diff --git a/.circleci/config.yml b/.circleci/config.yml index 9bcd6e8d8..acd447f69 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,12 +1,15 @@ version: 2.1 orbs: - go: gotest/tools@0.0.9 + go: gotest/tools@0.0.13 executors: golang: docker: - - image: circleci/golang:1.13 + - image: circleci/golang:1.14.6 resource_class: 2xlarge + ubuntu: + docker: + - image: ubuntu:19.10 commands: install-deps: @@ -24,6 +27,8 @@ commands: description: is a darwin build environment? type: boolean steps: + - checkout + - git_fetch_all_tags - checkout - when: condition: << parameters.linux >> @@ -37,16 +42,36 @@ commands: - restore_cache: name: Restore parameters cache keys: - - 'v20-1k-lotus-params' + - 'v25-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - - run: ./lotus fetch-params --proving-params 1024 + - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache - key: 'v20-1k-lotus-params' + key: 'v25-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - + install_ipfs: + steps: + - run: | + apt update + apt install -y wget + wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz + wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512 + if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ] + then + echo "ipfs failed checksum check" + exit 1 + fi + tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz + mv go-ipfs/ipfs /usr/local/bin/ipfs + chmod +x /usr/local/bin/ipfs + git_fetch_all_tags: + steps: + - run: + name: fetch all tags + command: | + git fetch --all jobs: mod-tidy-check: @@ -54,7 +79,6 @@ jobs: steps: - install-deps - prepare - - go/mod-download - go/mod-tidy-check build-all: @@ -62,23 +86,34 @@ jobs: steps: - install-deps - prepare - - go/mod-download - run: sudo apt-get update - run: sudo apt-get install npm - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - run: command: make buildall - store_artifacts: path: lotus - store_artifacts: - path: lotus-storage-miner + path: lotus-miner + - store_artifacts: + path: lotus-worker + - run: mkdir linux && mv lotus lotus-miner lotus-worker linux/ + - persist_to_workspace: + root: "." + paths: + - linux + + build-debug: + executor: golang + steps: + - install-deps + - prepare + - run: + command: make debug test: &test description: | Run tests with gotestsum. - parameters: + parameters: &test-params executor: type: executor default: golang @@ -90,13 +125,16 @@ jobs: type: string default: "./..." description: Import paths of packages to be tested. + winpost-test: + type: string + default: "0" test-suite-name: type: string default: unit description: Test suite name to report to CircleCI. gotestsum-format: type: string - default: short + default: pkgname-and-test-fails description: gotestsum format. https://github.com/gotestyourself/gotestsum#format coverage: type: string @@ -112,30 +150,34 @@ jobs: steps: - install-deps - prepare - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - run: command: make deps lotus no_output_timeout: 30m - download-params - go/install-gotestsum: gobin: $HOME/.local/bin + version: 0.5.2 - run: name: go test environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - GOTESTSUM_FORMAT: << parameters.gotestsum-format >> + LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >> + SKIP_CONFORMANCE: "1" command: | mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - gotestsum -- \ + mkdir -p /tmp/test-artifacts + gotestsum \ + --format << parameters.gotestsum-format >> \ + --junitfile /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml \ + --jsonfile /tmp/test-artifacts/<< parameters.test-suite-name >>.json \ + -- \ << parameters.coverage >> \ << parameters.go-test-flags >> \ << parameters.packages >> no_output_timeout: 30m - store_test_results: path: /tmp/test-reports + - store_artifacts: + path: /tmp/test-artifacts/<< parameters.test-suite-name >>.json - when: condition: << parameters.codecov-upload >> steps: @@ -145,16 +187,92 @@ jobs: shell: /bin/bash -eo pipefail command: | bash <(curl -s https://codecov.io/bash) - - save_cache: - name: save go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - paths: - - "~/go/pkg" - - "~/go/src/github.com" - - "~/go/src/golang.org" - test-short: + test-short: <<: *test + test-window-post: + <<: *test + test-conformance: + description: | + Run tests using a corpus of interoperable test vectors for Filecoin + implementations to test their correctness and compliance with the Filecoin + specifications. + parameters: + <<: *test-params + vectors-branch: + type: string + default: "" + description: | + Branch on github.com/filecoin-project/test-vectors to checkout and + test with. If empty (the default) the commit defined by the git + submodule is used. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make deps lotus + no_output_timeout: 30m + - download-params + - when: + condition: + not: + equal: [ "", << parameters.vectors-branch >> ] + steps: + - run: + name: checkout vectors branch + command: | + cd extern/test-vectors + git fetch + git checkout origin/<< parameters.vectors-branch >> + - go/install-gotestsum: + gobin: $HOME/.local/bin + version: 0.5.2 + - run: + name: install statediff globally + command: | + ## statediff is optional; we succeed even if compilation fails. + mkdir -p /tmp/statediff + git clone https://github.com/filecoin-project/statediff.git /tmp/statediff + cd /tmp/statediff + go install ./cmd/statediff || exit 0 + - run: + name: go test + environment: + SKIP_CONFORMANCE: "0" + command: | + mkdir -p /tmp/test-reports + mkdir -p /tmp/test-artifacts + gotestsum \ + --format pkgname-and-test-fails \ + --junitfile /tmp/test-reports/junit.xml \ + -- \ + -v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/ + go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - store_artifacts: + path: /tmp/test-artifacts/conformance-coverage.html + build-lotus-soup: + description: | + Compile `lotus-soup` Testground test plan using the current version of Lotus. + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: cd extern/oni && git submodule sync + - run: cd extern/oni && git submodule update --init + - run: cd extern/filecoin-ffi && make + - run: + name: "replace lotus, filecoin-ffi, blst and fil-blst deps" + command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst + - run: + name: "build lotus-soup testplan" + command: pushd extern/oni/lotus-soup && go build -tags=testground . + build-macos: description: build darwin lotus binary @@ -168,8 +286,8 @@ jobs: - run: name: Install go command: | - curl -O https://dl.google.com/go/go1.13.4.darwin-amd64.pkg && \ - sudo installer -pkg go1.13.4.darwin-amd64.pkg -target / + curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \ + sudo installer -pkg go1.14.2.darwin-amd64.pkg -target / - run: name: Install pkg-config command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config @@ -181,21 +299,26 @@ jobs: - run: name: Install jq command: | - mkdir $HOME/.bin - curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output $HOME/.bin/jq - chmod +x $HOME/.bin/jq + curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq + chmod +x /usr/local/bin/jq - restore_cache: - name: restore go mod and cargo cache + name: restore cargo cache key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} - install-deps - - go/mod-download - run: command: make build no_output_timeout: 30m - store_artifacts: path: lotus - store_artifacts: - path: lotus-storage-miner + path: lotus-miner + - store_artifacts: + path: lotus-worker + - run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/ + - persist_to_workspace: + root: "." + paths: + - darwin - save_cache: name: save cargo cache key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} @@ -203,6 +326,35 @@ jobs: - "~/.rustup" - "~/.cargo" + gofmt: + executor: golang + steps: + - install-deps + - prepare + - run: + command: "! go fmt ./... 2>&1 | read" + + cbor-gen-check: + executor: golang + steps: + - install-deps + - prepare + - run: make deps + - run: go install golang.org/x/tools/cmd/goimports + - run: go install github.com/hannahhoward/cbor-gen-for + - run: go generate ./... + - run: git --no-pager diff + - run: git --no-pager diff --quiet + + docs-check: + executor: golang + steps: + - install-deps + - prepare + - run: make docsgen + - run: git --no-pager diff + - run: git --no-pager diff --quiet + lint: &lint description: | Run golangci-lint. @@ -212,7 +364,7 @@ jobs: default: golang golangci-lint-version: type: string - default: 1.17.1 + default: 1.27.0 concurrency: type: string default: '2' @@ -228,7 +380,6 @@ jobs: steps: - install-deps - prepare - - go/mod-download - run: command: make deps no_output_timeout: 30m @@ -238,25 +389,91 @@ jobs: - run: name: Lint command: | - $HOME/.local/bin/golangci-lint run -v \ + $HOME/.local/bin/golangci-lint run -v --timeout 2m \ --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - lint-all: <<: *lint + publish: + description: publish binary artifacts + executor: ubuntu + steps: + - run: + name: Install git jq curl + command: apt update && apt install -y git jq curl + - checkout + - git_fetch_all_tags + - checkout + - install_ipfs + - attach_workspace: + at: "." + - run: + name: Create bundles + command: ./scripts/build-bundle.sh + - run: + name: Publish release + command: ./scripts/publish-release.sh + workflows: version: 2.1 ci: jobs: - - lint-changes: - args: "--new-from-rev origin/master" + - lint-all: + concurrency: "16" # expend all docker 2xlarge CPUs. + - mod-tidy-check + - gofmt + - cbor-gen-check + - docs-check - test: codecov-upload: true + test-suite-name: full + - test-window-post: + go-test-flags: "-run=TestWindowedPost" + winpost-test: "1" + test-suite-name: window-post - test-short: go-test-flags: "--timeout 10m --short" - - mod-tidy-check - - build-all - - build-macos + test-suite-name: short + filters: + tags: + only: + - /^v\d+\.\d+\.\d+$/ + - test-conformance: + test-suite-name: conformance + packages: "./conformance" + - test-conformance: + name: test-conformance-bleeding-edge + test-suite-name: conformance-bleeding-edge + packages: "./conformance" + vectors-branch: master + - build-lotus-soup + - build-debug + - build-all: + requires: + - test-short + filters: + tags: + only: + - /^v\d+\.\d+\.\d+$/ + - build-macos: + requires: + - test-short + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+$/ + - publish: + requires: + - build-all + - build-macos + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+$/ diff --git a/.codecov.yml b/.codecov.yml index cf409a6b6..1551f2276 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,3 +1,5 @@ comment: off ignore: - "cbor_gen.go" +github_checks: + annotations: false diff --git a/.dockerignore b/.dockerignore new file mode 120000 index 000000000..3e4e48b0b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..49e461d00 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,15 @@ +## filecoin-project/lotus CODEOWNERS +## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners. +## +## These users or groups will be automatically assigned as reviewers every time +## a PR is submitted that modifies code in the specified locations. +## +## The Lotus repo configuration requires that at least ONE codeowner approves +## the PR before merging. + +### Global owners. +* @magik6k @whyrusleeping @Kubuxu + +### Conformance testing. +conformance/ @raulk +extern/test-vectors @raulk diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index a1baede0d..1ded8c36b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -21,7 +21,7 @@ A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. -**Version (run `lotus --version`):** +**Version (run `lotus version`):** **Additional context** Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/sealingfailed.md b/.github/ISSUE_TEMPLATE/sealingfailed.md new file mode 100644 index 000000000..ae14c3262 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/sealingfailed.md @@ -0,0 +1,43 @@ +--- +name: Sealing Issues +about: Create a report for help with sealing (commit) failures. +title: '' +labels: 'sealing' +assignees: '' + +--- + +Please provide all the information requested here to help us troubleshoot "commit failed" issues. +If the information requested is missing, we will probably have to just ask you to provide it anyway, +before we can help debug. + +**Describe the problem** + +A brief description of the problem you encountered while proving (sealing) a sector. + +Including what commands you ran, and a description of your setup, is very helpful. + +**Sectors status** + +The output of `lotus-miner sectors status --log ` for the failed sector(s). + +**Lotus miner logs** + +Please go through the logs of your miner, and include screenshots of any error-like messages you find. + +Alternatively please upload full log files and share a link here + +**Lotus miner diagnostic info** + +Please collect the following diagnostic information, and share a link here + +* lotus-miner diagnostic info `lotus-miner info all > allinfo` + +** Code modifications ** + +If you have modified parts of lotus, please describe which areas were modified, +and the scope of those modifications + +**Version** + +The output of `lotus --version`. diff --git a/.github/labels.yml b/.github/labels.yml new file mode 100644 index 000000000..7102f1311 --- /dev/null +++ b/.github/labels.yml @@ -0,0 +1,248 @@ +### +### Special magic GitHub labels +### https://help.github.com/en/github/building-a-strong-community/encouraging-helpful-contributions-to-your-project-with-labels +# +- name: "good first issue" + color: 7057ff + description: "Good for newcomers" +- name: "help wanted" + color: 008672 + description: "Extra attention is needed" + +### +### Goals +# +- name: goal/incentives + color: ff004d + description: "Incentinet" + +### +### Areas +# +- name: area/ux + color: 00A4E0 + description: "Area: UX" +- name: area/chain/vm + color: 00A4E2 + description: "Area: Chain/VM" +- name: area/chain/sync + color: 00A4E4 + description: "Area: Chain/Sync" +- name: area/chain/misc + color: 00A4E6 + description: "Area: Chain/Misc" +- name: area/markets + color: 00A4E8 + description: "Area: Markets" +- name: area/sealing/fsm + color: 0bb1ed + description: "Area: Sealing/FSM" +- name: area/sealing/storage + color: 0EB4F0 + description: "Area: Sealing/Storage" +- name: area/proving + color: 0EB4F0 + description: "Area: Proving" +- name: area/mining + color: 10B6F2 + description: "Area: Mining" +- name: area/client/storage + color: 13B9F5 + description: "Area: Client/Storage" +- name: area/client/retrieval + color: 15BBF7 + description: "Area: Client/Retrieval" +- name: area/wallet + color: 15BBF7 + description: "Area: Wallet" +- name: area/payment-channel + color: ff6767 + description: "Area: Payment Channel" +- name: area/multisig + color: fff0ff + description: "Area: Multisig" +- name: area/networking + color: 273f8a + description: "Area: Networking" + +### +### Kinds +# +- name: kind/bug + color: c92712 + description: "Kind: Bug" +- name: kind/chore + color: fcf0b5 + description: "Kind: Chore" +- name: kind/feature + color: FFF3B8 + description: "Kind: Feature" +- name: kind/improvement + color: FFF5BA + description: "Kind: Improvement" +- name: kind/test + color: FFF8BD + description: "Kind: Test" +- name: kind/question + color: FFFDC2 + description: "Kind: Question" +- name: kind/enhancement + color: FFFFC5 + description: "Kind: Enhancement" +- name: kind/discussion + color: FFFFC7 + description: "Kind: Discussion" + +### +### Difficulties +# +- name: dif/trivial + color: b2b7ff + description: "Can be confidently tackled by newcomers, who are widely unfamiliar with lotus" +- name: dif/easy + color: 7886d7 + description: "An existing lotus user should be able to pick this up" +- name: dif/medium + color: 6574cd + description: "Prior development experience with lotus is likely helpful" +- name: dif/hard + color: 5661b3 + description: "Suggests that having worked on the specific component affected by this issue is important" +- name: dif/expert + color: 2f365f + description: "Requires extensive knowledge of the history, implications, ramifications of the issue" + +### +### Efforts +# +- name: effort/minutes + color: e8fffe + description: "Effort: Minutes" +- name: effort/hours + color: a0f0ed + description: "Effort: Hours" +- name: effort/day + color: 64d5ca + description: "Effort: One Day" +- name: effort/days + color: 4dc0b5 + description: "Effort: Multiple Days" +- name: effort/week + color: 38a89d + description: "Effort: One Week" +- name: effort/weeks + color: 20504f + description: "Effort: Multiple Weeks" + +### +### Impacts +# +- name: impact/regression + color: f1f5f8 + description: "Impact: Regression" +- name: impact/api-breakage + color: ECF0F3 + description: "Impact: API Breakage" +- name: impact/quality + color: E7EBEE + description: "Impact: Quality" +- name: impact/dx + color: E2E6E9 + description: "Impact: Developer Experience" +- name: impact/test-flakiness + color: DDE1E4 + description: "Impact: Test Flakiness" +- name: impact/consensus + color: b20014 + description: "Impact: Consensus" + +### +### Topics +# +- name: topic/interoperability + color: bf0f73 + description: "Topic: Interoperability" +- name: topic/specs + color: CC1C80 + description: "Topic: Specs" +- name: topic/docs + color: D9298D + description: "Topic: Documentation" +- name: topic/architecture + color: E53599 + description: "Topic: Architecture" + +### +### Priorities +### +- name: P0 + color: dd362a + description: "P0: Critical Blocker" +- name: P1 + color: ce8048 + description: "P1: Must be resolved" +- name: P2 + color: dbd81a + description: "P2: Should be resolved" +- name: P3 + color: 9fea8f + description: "P3: Might get resolved" + +### +### Hints +# +#- name: hint/good-first-issue +# color: 7057ff +# description: "Hint: Good First Issue" +#- name: hint/help-wanted +# color: 008672 +# description: "Hint: Help Wanted" +- name: hint/needs-decision + color: 33B9A5 + description: "Hint: Needs Decision" +- name: hint/needs-triage + color: 1AA08C + description: "Hint: Needs Triage" +- name: hint/needs-analysis + color: 26AC98 + description: "Hint: Needs Analysis" +- name: hint/needs-author-input + color: 33B9A5 + description: "Hint: Needs Author Input" +- name: hint/needs-team-input + color: 40C6B2 + description: "Hint: Needs Team Input" +- name: hint/needs-community-input + color: 4DD3BF + description: "Hint: Needs Community Input" +- name: hint/needs-review + color: 5AE0CC + description: "Hint: Needs Review" + +### +### Statuses +# +- name: status/done + color: edb3a6 + description: "Status: Done" +- name: status/deferred + color: E0A699 + description: "Status: Deferred" +- name: status/in-progress + color: D49A8D + description: "Status: In Progress" +- name: status/blocked + color: C78D80 + description: "Status: Blocked" +- name: status/inactive + color: BA8073 + description: "Status: Inactive" +- name: status/waiting + color: AD7366 + description: "Status: Waiting" +- name: status/rotten + color: 7A4033 + description: "Status: Rotten" +- name: status/discarded + color: 6D3326 + description: "Status: Discarded / Won't fix" diff --git a/.github/workflows/label-syncer.yml b/.github/workflows/label-syncer.yml new file mode 100644 index 000000000..a94b0edb6 --- /dev/null +++ b/.github/workflows/label-syncer.yml @@ -0,0 +1,17 @@ + +name: Label syncer +on: + push: + paths: + - '.github/labels.yml' + branches: + - master +jobs: + build: + name: Sync labels + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@1.0.0 + - uses: micnncim/action-label-syncer@v1.0.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index ce84e704d..05f762d8d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,24 @@ /lotus -/lotus-storage-miner -/lotus-seal-worker +/lotus-miner +/lotus-worker /lotus-seed -/pond -/townhall -/fountain -/stats -/bench +/lotus-health +/lotus-chainwatch +/lotus-shed +/lotus-pond +/lotus-townhall +/lotus-fountain +/lotus-stats +/lotus-bench +/lotus-gateway +/lotus-pcr /bench.json /lotuspond/front/node_modules /lotuspond/front/build /cmd/lotus-townhall/townhall/node_modules /cmd/lotus-townhall/townhall/build +/cmd/lotus-townhall/townhall/package-lock.json extern/filecoin-ffi/rust/target -**/*.h **/*.a **/*.pc /**/*/.DS_STORE @@ -23,11 +28,14 @@ build/paramfetch.sh /vendor /blocks.dot /blocks.svg -/chainwatch /chainwatch.db +/bundle +/darwin +/linux *-fuzz.zip /chain/types/work_msg/ bin/ipget bin/tmp/* .idea +scratchpad diff --git a/.gitmodules b/.gitmodules index a655f05b9..35f5a3d3f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -2,3 +2,15 @@ path = extern/filecoin-ffi url = https://github.com/filecoin-project/filecoin-ffi.git branch = master +[submodule "extern/serialization-vectors"] + path = extern/serialization-vectors + url = https://github.com/filecoin-project/serialization-vectors +[submodule "extern/test-vectors"] + path = extern/test-vectors + url = https://github.com/filecoin-project/test-vectors.git +[submodule "extern/fil-blst"] + path = extern/fil-blst + url = https://github.com/filecoin-project/fil-blst.git +[submodule "extern/oni"] + path = extern/oni + url = https://github.com/filecoin-project/oni diff --git a/.golangci.yml b/.golangci.yml index f873557aa..8bdba64f0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,8 @@ linters: disable-all: true enable: - - vet + - gofmt + - govet - goimports - misspell - goconst @@ -21,16 +22,51 @@ issues: - "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this" - "Potential file inclusion via variable" - "should have( a package)? comment" + - "Error return value of `logging.SetLogLevel` is not checked" + - "comment on exported" + - "(func|method) \\w+ should be \\w+" + - "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`" + - "(G306|G301|G307|G108|G302|G204|G104)" + - "don't use ALL_CAPS in Go names" + - "string .* has .* occurrences, make it a constant" + - "a blank import should be only in a main or test package, or have a comment justifying it" + - "package comment should be of the form" exclude-use-default: false exclude-rules: + - path: lotuspond + linters: + - errcheck + - path: node/modules/lp2p linters: - golint - - path: ".*_test.go" + + - path: build/params_.*\.go + linters: + - golint + + - path: api/apistruct/struct.go + linters: + - golint + + - path: .*_test.go linters: - gosec + - path: chain/vectors/gen/.* + linters: + - gosec + + - path: cmd/lotus-bench/.* + linters: + - gosec + + - path: api/test/.* + text: "context.Context should be the first parameter" + linters: + - golint + linters-settings: goconst: min-occurrences: 6 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ba83a258..ac687675e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,569 @@ -# lotus changelog +# Lotus changelog -## 0.1.0 / 2019-12-11 +# 0.8.0 / 2020-09-26 -We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [docs.lotu.sh](docs.lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues). +This consensus-breaking release of Lotus introduces an upgrade to the network. The changes that break consensus are: -A huge thank you to all contributors for this testnet release! \ No newline at end of file +- Upgrading to specs-actors v0.9.11, which reduces WindowPoSt faults per [FIP 0002](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0002.md) to reduce cost for honest miners with occasional faults (see https://github.com/filecoin-project/specs-actors/pull/1181) +- Revisions to some cryptoeconomics and network params + +This release also updates go-fil-markets to fix an incompatibility issue between v0.7.2 and earlier versions. + +## Changes + +#### Dependencies + +- Update spec actors to 0.9.11 (https://github.com/filecoin-project/lotus/pull/4039) +- Update markets to 0.6.3 (https://github.com/filecoin-project/lotus/pull/4013) + +#### Core Lotus + +- Network upgrade (https://github.com/filecoin-project/lotus/pull/4039) +- Fix AddSupportedProofTypes (https://github.com/filecoin-project/lotus/pull/4033) +- Return an error when we fail to find a sector when checking sector expiration (https://github.com/filecoin-project/lotus/pull/4026) +- Batch blockstore copies after block validation (https://github.com/filecoin-project/lotus/pull/3980) +- Remove a misleading miner actor abstraction (https://github.com/filecoin-project/lotus/pull/3977) +- Fix out-of-bounds when loading all sector infos (https://github.com/filecoin-project/lotus/pull/3976) +- Fix break condition in the miner (https://github.com/filecoin-project/lotus/pull/3953) + +#### UX + +- Correct helptext around miners setting ask (https://github.com/filecoin-project/lotus/pull/4009) +- Make sync wait nicer (https://github.com/filecoin-project/lotus/pull/3991) + +#### Tooling and validation + +- Small adjustments following network upgradability changes (https://github.com/filecoin-project/lotus/pull/3996) +- Add some more big pictures stats to stateroot stat (https://github.com/filecoin-project/lotus/pull/3995) +- Add some actors policy setters for testing (https://github.com/filecoin-project/lotus/pull/3975) + +## Contributors + +The following contributors had 5 or more commits go into this release. +We are grateful for every contribution! + +| Contributor | Commits | Lines ± | +|--------------------|---------|---------------| +| arajasek | 66 | +3140/-1261 | +| Stebalien | 64 | +3797/-3434 | +| magik6k | 48 | +1892/-976 | +| raulk | 40 | +2412/-1549 | +| vyzo | 22 | +287/-196 | +| alanshaw | 15 | +761/-146 | +| whyrusleeping | 15 | +736/-52 | +| hannahhoward | 14 | +1237/837- | +| anton | 6 | +32/-8 | +| travisperson | 5 | +502/-6 | +| Frank | 5 | +78/-39 | +| Jennifer | 5 | +148/-41 | + +# 0.7.2 / 2020-09-23 + +This optional release of Lotus introduces a major refactor around how a Lotus node interacts with code from the specs-actors repo. We now use interfaces to read the state of actors, which is required to be able to reason about different versions of actors code at the same time. + +Additionally, this release introduces various improvements to the sync process, as well as changes to better the overall UX experience. + +## Changes + +#### Core Lotus + +- Network upgrade support (https://github.com/filecoin-project/lotus/pull/3781) +- Upgrade markets to `v0.6.2` (https://github.com/filecoin-project/lotus/pull/3974) +- Validate chain sync response indices when fetching messages (https://github.com/filecoin-project/lotus/pull/3939) +- Add height diff to sync wait (https://github.com/filecoin-project/lotus/pull/3926) +- Replace Requires with Wants (https://github.com/filecoin-project/lotus/pull/3898) +- Update state diffing for market actor (https://github.com/filecoin-project/lotus/pull/3889) +- Parallel fetch for sync (https://github.com/filecoin-project/lotus/pull/3887) +- Fix SectorState (https://github.com/filecoin-project/lotus/pull/3881) + +#### User Experience + +- Add basic deal stats api server for spacerace slingshot (https://github.com/filecoin-project/lotus/pull/3963) +- When doing `sectors update-state`, show a list of existing states if user inputs an invalid one (https://github.com/filecoin-project/lotus/pull/3944) +- Fix `lotus-miner storage find` error (https://github.com/filecoin-project/lotus/pull/3927) +- Log shutdown method for lotus daemon and miner (https://github.com/filecoin-project/lotus/pull/3925) +- Update build and setup instruction link (https://github.com/filecoin-project/lotus/pull/3919) +- Add an option to hide removed sectors from `sectors list` output (https://github.com/filecoin-project/lotus/pull/3903) + +#### Testing and validation + +- Add init.State#Remove() for testing (https://github.com/filecoin-project/lotus/pull/3971) +- lotus-shed: add consensus check command (https://github.com/filecoin-project/lotus/pull/3933) +- Add keyinfo verify and jwt token command to lotus-shed (https://github.com/filecoin-project/lotus/pull/3914) +- Fix conformance gen (https://github.com/filecoin-project/lotus/pull/3892) + +# 0.7.1 / 2020-09-17 + +This optional release of Lotus introduces some critical fixes to the window PoSt process. It also upgrades some core dependencies, and introduces many improvements to the mining process, deal-making cycle, and overall User Experience. + +## Changes + +#### Some notable improvements: + +- Correctly construct params for `SubmitWindowedPoSt` messages (https://github.com/filecoin-project/lotus/pull/3909) +- Skip sectors correctly for Window PoSt (https://github.com/filecoin-project/lotus/pull/3839) +- Split window PoST submission into multiple messages (https://github.com/filecoin-project/lotus/pull/3689) +- Improve journal coverage (https://github.com/filecoin-project/lotus/pull/2455) +- Allow retrievals while sealing (https://github.com/filecoin-project/lotus/pull/3778) +- Don't prune locally published messages (https://github.com/filecoin-project/lotus/pull/3772) +- Add get-ask, set-ask retrieval commands (https://github.com/filecoin-project/lotus/pull/3886) +- Consistently name winning and window post in logs (https://github.com/filecoin-project/lotus/pull/3873)) +- Add auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3752)) + +#### Dependencies + +- Upgrade markets to `v0.6.1` (https://github.com/filecoin-project/lotus/pull/3906) +- Upgrade specs-actors to `v0.9.10` (https://github.com/filecoin-project/lotus/pull/3846) +- Upgrade badger (https://github.com/filecoin-project/lotus/pull/3739) + +# 0.7.0 / 2020-09-10 + +This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are: + +- Upgrading the Drand network used from the test Drand network to the League of Entropy main drand network. This is the same Drand network that will be used in the Filecoin mainnet. +- Upgrading to specs-actors v0.9.8, which adds a new method to the Multisig actor. + +## Changes + +#### Core Lotus + +- Fix IsAncestorOf (https://github.com/filecoin-project/lotus/pull/3717) +- Update to specs-actors v0.9.8 (https://github.com/filecoin-project/lotus/pull/3725) +- Increase chain throughput by 20% (https://github.com/filecoin-project/lotus/pull/3732) +- Updare to go-libp2p-pubsub `master` (https://github.com/filecoin-project/lotus/pull/3735) +- Drand upgrade (https://github.com/filecoin-project/lotus/pull/3670) +- Multisig API additions (https://github.com/filecoin-project/lotus/pull/3590) + +#### Storage Miner + +- Increase the number of times precommit2 is attempted before moving back to precommit1 (https://github.com/filecoin-project/lotus/pull/3720) + +#### Message pool + +- Relax mpool add strictness checks for local pushes (https://github.com/filecoin-project/lotus/pull/3724) + + +#### Maintenance + +- Fix devnets (https://github.com/filecoin-project/lotus/pull/3712) +- Fix(chainwatch): compare prev miner with cur miner (https://github.com/filecoin-project/lotus/pull/3715) +- CI: fix statediff build; make optional (https://github.com/filecoin-project/lotus/pull/3729) +- Feat: Chaos abort (https://github.com/filecoin-project/lotus/pull/3733) + +## Contributors + +The following contributors had commits go into this release. +We are grateful for every contribution! + +| Contributor | Commits | Lines ± | +|--------------------|---------|---------------| +| arajasek | 28 | +1144/-239 | +| Kubuxu | 19 | +452/-261 | +| whyrusleeping | 13 | +456/-87 | +| vyzo | 11 | +318/-20 | +| raulk | 10 | +1289/-350 | +| magik6k | 6 | +188/-55 | +| dirkmc | 3 | +31/-8 | +| alanshaw | 3 | +176/-37 | +| Stebalien | 2 | +9/-12 | +| lanzafame | 1 | +1/-1 | +| frrist | 1 | +1/-1 | +| mishmosh | 1 | +1/-1 | +| nonsense | 1 | +1/-0 | + +# 0.6.2 / 2020-09-09 + +This release introduces some critical fixes to message selection and gas estimation logic. It also adds the ability for nodes to mark a certain tipset as checkpointed, as well as various minor improvements and bugfixes. + +## Changes + +#### Messagepool + +- Warn when optimal selection fails to pack a block and we fall back to random selection (https://github.com/filecoin-project/lotus/pull/3708) +- Add basic command for printing gas performance of messages in the mpool (https://github.com/filecoin-project/lotus/pull/3701) +- Adjust optimal selection to always try to fill blocks (https://github.com/filecoin-project/lotus/pull/3685) +- Fix very minor bug in repub baseFeeLowerBound (https://github.com/filecoin-project/lotus/pull/3663) +- Add an auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3676) +- Fix mpool optimal selection packing failure (https://github.com/filecoin-project/lotus/pull/3698) + +#### Core Lotus + +- Don't use latency as initital estimate for blocksync (https://github.com/filecoin-project/lotus/pull/3648) +- Add niceSleep 1 second when drand errors (https://github.com/filecoin-project/lotus/pull/3664) +- Fix isChainNearSync check in block validator (https://github.com/filecoin-project/lotus/pull/3650) +- Add peer to peer manager before fetching the tipset (https://github.com/filecoin-project/lotus/pull/3667) +- Add StageFetchingMessages to sync status (https://github.com/filecoin-project/lotus/pull/3668) +- Pass tipset through upgrade logic (https://github.com/filecoin-project/lotus/pull/3673) +- Allow nodes to mark tipsets as checkpointed (https://github.com/filecoin-project/lotus/pull/3680) +- Remove hard-coded late-fee in window PoSt (https://github.com/filecoin-project/lotus/pull/3702) +- Gas: Fix median calc (https://github.com/filecoin-project/lotus/pull/3686) + +#### Storage + +- Storage manager: bail out with an error if unsealed cid is undefined (https://github.com/filecoin-project/lotus/pull/3655) +- Storage: return true from Sealer.ReadPiece() on success (https://github.com/filecoin-project/lotus/pull/3657) + +#### Maintenance + +- Resolve lotus, test-vectors, statediff dependency cycle (https://github.com/filecoin-project/lotus/pull/3688) +- Paych: add docs on how to use paych status (https://github.com/filecoin-project/lotus/pull/3690) +- Initial CODEOWNERS (https://github.com/filecoin-project/lotus/pull/3691) + +# 0.6.1 / 2020-09-08 + +This optional release introduces a minor improvement to the sync process, ensuring nodes don't fall behind and then resync. + +## Changes + +- Update `test-vectors` (https://github.com/filecoin-project/lotus/pull/3645) +- Revert "only subscribe to pubsub topics once we are synced" (https://github.com/filecoin-project/lotus/pull/3643) + +# 0.6.0 / 2020-09-07 + +This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are: + +- Tweaking of some cryptoecon parameters in specs-actors 0.9.7 (https://github.com/filecoin-project/specs-actors/releases/tag/v0.9.7) +- Rebalancing FIL distribution to make testnet FIL scarce, which prevents base fee spikes and sets better expectations for mainnet + +This release also introduces many improvements to Lotus! Among them are a new version of go-fil-markets that supports non-blocking retrieval, various spam reduction measures in the messagepool and p2p logic, and UX improvements to payment channels, dealmaking, and state inspection. + +## Changes + +#### Core Lotus and dependencies + +- Implement faucet funds reallocation logic (https://github.com/filecoin-project/lotus/pull/3632) +- Network upgrade: Upgrade to correct fork threshold (https://github.com/filecoin-project/lotus/pull/3628) +- Update to specs 0.9.7 and markets 0.6.0 (https://github.com/filecoin-project/lotus/pull/3627) +- Network upgrade: Perform base fee tamping (https://github.com/filecoin-project/lotus/pull/3623) +- Chain events: if cache best() is nil, return chain head (https://github.com/filecoin-project/lotus/pull/3611) +- Update to specs actors v0.9.6 (https://github.com/filecoin-project/lotus/pull/3603) + +#### Messagepool + +- Temporarily allow negative chains (https://github.com/filecoin-project/lotus/pull/3625) +- Improve publish/republish logic (https://github.com/filecoin-project/lotus/pull/3592) +- Fix selection bug; priority messages were not included if other chains were negative (https://github.com/filecoin-project/lotus/pull/3580) +- Add defensive check for minimum GasFeeCap for inclusion within the next 20 blocks (https://github.com/filecoin-project/lotus/pull/3579) +- Add additional info about gas premium (https://github.com/filecoin-project/lotus/pull/3578) +- Fix GasPremium capping logic (https://github.com/filecoin-project/lotus/pull/3552) + +#### Payment channels + +- Get available funds by address or by from/to (https://github.com/filecoin-project/lotus/pull/3547) +- Create `lotus paych status` command (https://github.com/filecoin-project/lotus/pull/3523) +- Rename CLI command from "paych get" to "paych add-funds" (https://github.com/filecoin-project/lotus/pull/3520) + +#### Peer-to-peer + +- Only subscribe to pubsub topics once we are synced (https://github.com/filecoin-project/lotus/pull/3602) +- Reduce mpool add failure log spam (https://github.com/filecoin-project/lotus/pull/3562) +- Republish messages even if the chains have negative performance(https://github.com/filecoin-project/lotus/pull/3557) +- Adjust gossipsub gossip factor (https://github.com/filecoin-project/lotus/pull/3556) +- Integrate pubsub Random Early Drop (https://github.com/filecoin-project/lotus/pull/3518) + +#### Miscellaneous + +- Fix panic in OnDealExpiredSlashed (https://github.com/filecoin-project/lotus/pull/3553) +- Robustify state manager against holes in actor method numbers (https://github.com/filecoin-project/lotus/pull/3538) + +#### UX + +- VM: Fix an error message (https://github.com/filecoin-project/lotus/pull/3608) +- Documentation: Batch replacement,update lotus-storage-miner to lotus-miner (https://github.com/filecoin-project/lotus/pull/3571) +- CLI: Robust actor lookup (https://github.com/filecoin-project/lotus/pull/3535) +- Add agent flag to net peers (https://github.com/filecoin-project/lotus/pull/3534) +- Add watch option to storage-deals list (https://github.com/filecoin-project/lotus/pull/3527) + +#### Testing & tooling + +- Decommission chain-validation (https://github.com/filecoin-project/lotus/pull/3606) +- Metrics: add expected height metric (https://github.com/filecoin-project/lotus/pull/3586) +- PCR: Use current tipset during refund (https://github.com/filecoin-project/lotus/pull/3570) +- Lotus-shed: Add math command (https://github.com/filecoin-project/lotus/pull/3568) +- PCR: Add tipset aggergation (https://github.com/filecoin-project/lotus/pull/3565)- Fix broken paych tests (https://github.com/filecoin-project/lotus/pull/3551) +- Make chain export ~1000x times faster (https://github.com/filecoin-project/lotus/pull/3533) +- Chainwatch: Stop SyncIncomingBlocks from leaking into chainwatch processing; No panics during processing (https://github.com/filecoin-project/lotus/pull/3526) +- Conformance: various changes (https://github.com/filecoin-project/lotus/pull/3521) + +# 0.5.10 / 2020-09-03 + +This patch includes a crucial fix to the message pool selection logic, strongly disfavouring messages that might cause a miner penalty. + +## Changes + +- Fix calculation of GasReward in messagepool (https://github.com/filecoin-project/lotus/pull/3528) + +# 0.5.9 / 2020-09-03 + +This patch includes a hotfix to the `GasEstimateFeeCap` method, capping the estimated fee to a reasonable level by default. + +## Changes + +- Added target height to sync wait (https://github.com/filecoin-project/lotus/pull/3502) +- Disable codecov annotations (https://github.com/filecoin-project/lotus/pull/3514) +- Cap fees to reasonable level by default (https://github.com/filecoin-project/lotus/pull/3516) +- Add APIs and command to inspect bandwidth usage (https://github.com/filecoin-project/lotus/pull/3497) +- Track expected nonce in mpool, ignore messages with large nonce gaps (https://github.com/filecoin-project/lotus/pull/3450) + +# 0.5.8 / 2020-09-02 + +This patch includes some bugfixes to the sector sealing process, and updates go-fil-markets. It also improves the performance of blocksync, adds a method to export chain state trees, and improves chainwatch. + +## Changes + +- Upgrade markets to v0.5.9 (https://github.com/filecoin-project/lotus/pull/3496) +- Improve blocksync to load fewer messages: (https://github.com/filecoin-project/lotus/pull/3494) +- Fix a panic in the ffi-wrapper's `ReadPiece` (https://github.com/filecoin-project/lotus/pull/3492/files) +- Fix a deadlock in the sealing scheduler (https://github.com/filecoin-project/lotus/pull/3489) +- Add test vectors for tipset tests (https://github.com/filecoin-project/lotus/pull/3485/files) +- Improve the advance-block debug command (https://github.com/filecoin-project/lotus/pull/3476) +- Add toggle for message processing to Lotus PCR (https://github.com/filecoin-project/lotus/pull/3470) +- Allow exporting recent chain state trees (https://github.com/filecoin-project/lotus/pull/3463) +- Remove height from chain rand (https://github.com/filecoin-project/lotus/pull/3458) +- Disable GC on chain badger datastore (https://github.com/filecoin-project/lotus/pull/3457) +- Account for `GasPremium` in `GasEstimateFeeCap` (https://github.com/filecoin-project/lotus/pull/3456) +- Update go-libp2p-pubsub to `master` (https://github.com/filecoin-project/lotus/pull/3455) +- Chainwatch improvements (https://github.com/filecoin-project/lotus/pull/3442) + +# 0.5.7 / 2020-08-31 + +This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic. + +## Changes + +- Rebuild unsealed infos on miner restart (https://github.com/filecoin-project/lotus/pull/3401) +- CLI to attach storage paths to workers (https://github.com/filecoin-project/lotus/pull/3405) +- Do not select negative performing message chains for inclusion (https://github.com/filecoin-project/lotus/pull/3392) +- Remove a redundant error-check (https://github.com/filecoin-project/lotus/pull/3421) +- Correctly move unsealed sectors in `FinalizeSectors` (https://github.com/filecoin-project/lotus/pull/3424) +- Improve worker selection logic (https://github.com/filecoin-project/lotus/pull/3425) +- Don't use context to close bitswap (https://github.com/filecoin-project/lotus/pull/3430) +- Correctly estimate gas premium when there is only one message on chain (https://github.com/filecoin-project/lotus/pull/3428) + +# 0.5.6 / 2020-08-29 + +Hotfix release that fixes a panic in the sealing scheduler (https://github.com/filecoin-project/lotus/pull/3389). + +# 0.5.5 + +This patch release introduces a large number of improvements to the sealing process. +It also updates go-fil-markets to +[version 0.5.8](https://github.com/filecoin-project/go-fil-markets/releases/tag/v0.5.8), +and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.3.5). + +#### Downstream upgrades + +- Upgrades markets to v0.5.8 (https://github.com/filecoin-project/lotus/pull/3384) +- Upgrades go-libp2p-pubsub to v0.3.5 (https://github.com/filecoin-project/lotus/pull/3305) + +#### Sector sealing + +- The following improvements were introduced in https://github.com/filecoin-project/lotus/pull/3350. + + - Allow `lotus-miner sectors remove` to remove a sector in any state. + - Create a separate state in the storage FSM dedicated to submitting the Commit message. + - Recovery for when the Deal IDs of deals in a sector get changed in a reorg. + - Auto-retry sending Precommit and Commit messages if they run out of gas + - Auto-retry sector remove tasks when they fail + - Compact worker windows, and allow their tasks to be executed in any order + +- Don't simply skip PoSt for bad sectors (https://github.com/filecoin-project/lotus/pull/3323) + +#### Message Pool + +- Spam Protection: Track required funds for pending messages (https://github.com/filecoin-project/lotus/pull/3313) + +#### Chainwatch + +- Add more power and reward metrics (https://github.com/filecoin-project/lotus/pull/3367) +- Fix raciness in sector deal table (https://github.com/filecoin-project/lotus/pull/3275) +- Parallelize miner processing (https://github.com/filecoin-project/lotus/pull/3380) +- Accept Lotus API and token (https://github.com/filecoin-project/lotus/pull/3337) + +# 0.5.4 + +A patch release, containing a few nice bugfixes and improvements: + +- Fix parsing of peer ID in `lotus-miner actor set-peer-id` (@whyrusleeping) +- Update dependencies, fixing several bugs (@Stebalien) +- Fix remaining linter warnings (@Stebalien) +- Use safe string truncation (@Ingar) +- Allow tweaking of blocksync message window size (@whyrusleeping) +- Add some additional gas stats to metrics (@Kubuxu) +- Fix an edge case bug in message selection, add many tests (@vyzo) + +# 0.5.3 + +Yet another hotfix release. +A lesson for readers, having people who have been awake for 12+ hours review +your hotfix PR is not a good idea. Find someone who has enough slept recently +enough to give you good code review, otherwise you'll end up quickly bumping +versions again. + +- Fixed a bug in the mempool that was introduced in v0.5.2 + +# 0.5.2 / 2020-08-24 + +This is a hotfix release. + +- Fix message selection to not include messages that are invalid for block + inclusion. +- Improve SelectMessage handling of the case where the message pools tipset + differs from our mining base. + +# 0.5.1 / 2020-08-24 + +The Space Race release! +This release contains the genesis car file and bootstrap peers for the space +race network. + +Additionally, we included two small fixes to genesis creation: +- Randomize ticket value in genesis generation +- Correctly set t099 (burnt funds actor) to have valid account actor state + +# 0.5.0 / 2020-08-20 + +This version of Lotus will be used for the incentivized testnet Space Race competition, +and can be considered mainnet-ready code. It includes some protocol +changes, upgrades of core dependencies, and various bugfixes and UX/performance improvements. + +## Highlights + +Among the highlights included in this release are: + +- Gas changes: We implemented EIP-1559 and introduced real gas values. +- Deal-making: We now support "Committed Capacity" sectors, "fast-retrieval" deals, +and the packing of multiple deals into a single sector. +- Renamed features: We renamed some of the binaries, environment variables, and default +paths associated with a Lotus node. + +### Gas changes + +We made some significant changes to the mechanics of gas in this release. + +#### Network fee + +We implemented something similar to +[Ethereum's EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md). +The `Message` structure had three changes: +- The `GasPrice` field has been removed +- A new `GasFeeCap` field has been added, which controls the maximum cost +the sender incurs for the message +- A new `GasPremium` field has been added, which controls the reward a miner +earns for including the message + +A sender will never be charged more than `GasFeeCap * GasLimit`. +A miner will typically earn `GasPremium * GasLimit` as a reward. + +The `Blockheader` structure has one new field, called `ParentBaseFee`. +Informally speaking,the `ParentBaseFee` +is increased when blocks are densely packed with messages, and decreased otherwise. + +The `ParentBaseFee` is used when calculating how much a sender burns when executing a message. _Burning_ simply refers to sending attoFIL to a dedicated, unreachable account. +A message causes `ParentBaseFee * GasUsed` attoFIL to be burnt. + +#### Real gas values + +This release also includes our first "real" gas costs for primitive operations. +The costs were designed to account for both the _time_ that message execution takes, +as well as the _space_ a message adds to the state tree. + +## Deal-making changes + +There are three key changes to the deal-making process. + +#### Committed Capacity sectors + +Miners can now pledge "Committed Capacity" (CC) sectors, which are explicitly +stated as containing junk data, and must not include any deals. Miners can do this +to increase their storage power, and win block rewards from this pledged storage. + +They can mark these sectors as "upgradable" with `lotus-miner sectors mark-for-upgrade`. +If the miner receives and accepts one or more storage deals, the sector that includes +those deals will _replace_ the CC sector. This is intended to maximize the amount of useful +storage on the Filecoin network. + +#### Fast-retrieval deals + +Clients can now include a `fast-retrieval` flag when proposing deals with storage miners. +If set to true, the miner will include an extra copy of the deal data. This +data can be quickly served in a retrieval deal, since it will not need to be unsealed. + +#### Multiple deals per sector + +Miners can now pack multiple deals into a single sector, so long as all the deals +fit into the sector capacity. This should increase the packing efficiency of miners. + +### Renamed features + +To improve the user experience, we updated several names to mainatin +standard prefixing, and to better reflect the meaning of the features being referenced. + +In particular, the Lotus miner binary is now called `lotus-miner`, the default +path for miner data is now `~/.lotusminer`, and the environment variable +that sets the path for miner data is now `$LOTUS_MINER_PATH`. A full list of renamed +features can be found [here](https://github.com/filecoin-project/lotus/issues/2304). + +## Changelog + +#### Downstream upgrades +- Upgrades markets to v0.5.6 (https://github.com/filecoin-project/lotus/pull/3058) +- Upgrades specs-actors to v0.9.3 (https://github.com/filecoin-project/lotus/pull/3151) + +#### Core protocol +- Introduces gas values, replacing placeholders (https://github.com/filecoin-project/lotus/pull/2343) +- Implements EIP-1559, introducing a network base fee, message gas fee cap, and message gas fee premium (https://github.com/filecoin-project/lotus/pull/2874) +- Implements Poisson Sortition for elections (https://github.com/filecoin-project/lotus/pull/2084) + +#### Deal-making lifecycle +- Introduces "Committed Capacity" sectors (https://github.com/filecoin-project/lotus/pull/2220) +- Introduces "fast-retrieval" flag for deals (https://github.com/filecoin-project/lotus/pull/2323 +- Supports packing multiple deals into one sector (https://github.com/filecoin-project/storage-fsm/pull/38) + +#### Enhancements + +- Optimized message pool selection logic (https://github.com/filecoin-project/lotus/pull/2838) +- Window-based scheduling of sealing tasks (https://github.com/filecoin-project/sector-storage/pull/67) +- Faster window PoSt (https://github.com/filecoin-project/lotus/pull/2209/files) +- Refactors the payment channel manager (https://github.com/filecoin-project/lotus/pull/2640) +- Refactors blocksync (https://github.com/filecoin-project/lotus/pull/2715/files) + +#### UX + +- Provide status updates for data-transfer (https://github.com/filecoin-project/lotus/pull/3162, https://github.com/filecoin-project/lotus/pull/3191) +- Miners can customise asks (https://github.com/filecoin-project/lotus/pull/2046) +- Miners can toggle auto-acceptance of deals (https://github.com/filecoin-project/lotus/pull/1994) +- Miners can maintain a blocklist of piece CIDs (https://github.com/filecoin-project/lotus/pull/2069) + +## Contributors + +The following contributors had 10 or more commits go into this release. +We are grateful for every contribution! + +| Contributor | Commits | Lines ± | +|--------------------|---------|---------------| +| magik6k | 361 | +13197/-6136 | +| Kubuxu | 227 | +5670/-2587 | +| arajasek | 120 | +2916/-1264 | +| whyrusleeping | 112 | +3979/-1089 | +| vyzo | 99 | +3343/-1305 | +| dirkmc | 68 | +8732/-3621 | +| laser | 45 | +1489/-501 | +| hannahhoward | 43 | +2654/-990 | +| frrist | 37 | +6630/-4338 | +| schomatis | 28 | +3016/-1368 | +| placer14 | 27 | +824/-350 | +| raulk | 25 | +28718/-29849 | +| mrsmkl | 22 | +560/-368 | +| travisperson | 18 | +1354/-314 | +| nonsense | 16 | +2956/-2842 | +| ingar | 13 | +331/-123 | +| daviddias | 11 | +311/-11 | +| Stebalien | 11 | +1204/-980 | +| RobQuistNL | 10 | +69/-74 | + +# 0.1.0 / 2019-12-11 + +We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [lotu.sh](lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues). + +A huge thank you to all contributors for this testnet release! diff --git a/Makefile b/Makefile index 083a15d70..56ab361ec 100644 --- a/Makefile +++ b/Makefile @@ -3,8 +3,10 @@ SHELL=/usr/bin/env bash all: build .PHONY: all +unexport GOFLAGS + GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2) -ifeq ($(shell expr $(GOVERSION) \< 13), 1) +ifeq ($(shell expr $(GOVERSION) \< 14), 1) $(warning Your Golang version is go 1.$(GOVERSION)) $(error Update Golang to version $(shell grep '^go' go.mod)) endif @@ -14,12 +16,19 @@ MODULES:= CLEAN:= BINS:= -GOFLAGS+=-ldflags="-X "github.com/filecoin-project/lotus/build".CurrentCommit=+git$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null))" + +ldflags=-X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)) +ifneq ($(strip $(LDFLAGS)),) + ldflags+=-extldflags=$(LDFLAGS) +endif + +GOFLAGS+=-ldflags="$(ldflags)" + ## FFI FFI_PATH:=extern/filecoin-ffi/ -FFI_DEPS:=libfilecoin.a filecoin.pc filecoin.h +FFI_DEPS:=.install-filcrypto FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS)) $(FFI_DEPS): build/.filecoin-install ; @@ -49,7 +58,10 @@ deps: $(BUILD_DEPS) .PHONY: deps debug: GOFLAGS+=-tags=debug -debug: lotus lotus-storage-miner lotus-seal-worker lotus-seed +debug: lotus lotus-miner lotus-worker lotus-seed + +2k: GOFLAGS+=-tags=2k +2k: lotus lotus-miner lotus-worker lotus-seed lotus: $(BUILD_DEPS) rm -f lotus @@ -59,36 +71,49 @@ lotus: $(BUILD_DEPS) .PHONY: lotus BINS+=lotus -lotus-storage-miner: $(BUILD_DEPS) - rm -f lotus-storage-miner - go build $(GOFLAGS) -o lotus-storage-miner ./cmd/lotus-storage-miner - go run github.com/GeertJohan/go.rice/rice append --exec lotus-storage-miner -i ./build -.PHONY: lotus-storage-miner -BINS+=lotus-storage-miner +lotus-miner: $(BUILD_DEPS) + rm -f lotus-miner + go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner + go run github.com/GeertJohan/go.rice/rice append --exec lotus-miner -i ./build +.PHONY: lotus-miner +BINS+=lotus-miner -lotus-seal-worker: $(BUILD_DEPS) - rm -f lotus-seal-worker - go build $(GOFLAGS) -o lotus-seal-worker ./cmd/lotus-seal-worker - go run github.com/GeertJohan/go.rice/rice append --exec lotus-seal-worker -i ./build -.PHONY: lotus-seal-worker -BINS+=lotus-seal-worker +lotus-worker: $(BUILD_DEPS) + rm -f lotus-worker + go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker + go run github.com/GeertJohan/go.rice/rice append --exec lotus-worker -i ./build +.PHONY: lotus-worker +BINS+=lotus-worker lotus-shed: $(BUILD_DEPS) rm -f lotus-shed go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed -.PHONY: lotus-seal-worker -BINS+=lotus-seal-worker + go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build +.PHONY: lotus-shed +BINS+=lotus-shed -build: lotus lotus-storage-miner lotus-seal-worker +lotus-gateway: $(BUILD_DEPS) + rm -f lotus-gateway + go build $(GOFLAGS) -o lotus-gateway ./cmd/lotus-gateway +.PHONY: lotus-gateway +BINS+=lotus-gateway + +build: lotus lotus-miner lotus-worker @[[ $$(type -P "lotus") ]] && echo "Caution: you have \ an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true .PHONY: build -install: +install: install-daemon install-miner install-worker + +install-daemon: install -C ./lotus /usr/local/bin/lotus - install -C ./lotus-storage-miner /usr/local/bin/lotus-storage-miner - install -C ./lotus-seal-worker /usr/local/bin/lotus-seal-worker + +install-miner: + install -C ./lotus-miner /usr/local/bin/lotus-miner + +install-worker: + install -C ./lotus-worker /usr/local/bin/lotus-worker # TOOLS @@ -106,51 +131,141 @@ benchmarks: @curl -X POST 'http://benchmark.kittyhawk.wtf/benchmark' -d '@bench.json' -u "${benchmark_http_cred}" .PHONY: benchmarks -pond: build - go build -o pond ./lotuspond +lotus-pond: 2k + go build -o lotus-pond ./lotuspond (cd lotuspond/front && npm i && CI=false npm run build) -.PHONY: pond -BINS+=pond +.PHONY: lotus-pond +BINS+=lotus-pond -townhall: - rm -f townhall - go build -o townhall ./cmd/lotus-townhall +lotus-townhall: + rm -f lotus-townhall + go build -o lotus-townhall ./cmd/lotus-townhall (cd ./cmd/lotus-townhall/townhall && npm i && npm run build) - go run github.com/GeertJohan/go.rice/rice append --exec townhall -i ./cmd/lotus-townhall -i ./build -.PHONY: townhall -BINS+=townhall + go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build +.PHONY: lotus-townhall +BINS+=lotus-townhall -fountain: - rm -f fountain - go build -o fountain ./cmd/lotus-fountain - go run github.com/GeertJohan/go.rice/rice append --exec fountain -i ./cmd/lotus-fountain -.PHONY: fountain -BINS+=fountain +lotus-fountain: + rm -f lotus-fountain + go build -o lotus-fountain ./cmd/lotus-fountain + go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build +.PHONY: lotus-fountain +BINS+=lotus-fountain -chainwatch: - rm -f chainwatch - go build -o chainwatch ./cmd/lotus-chainwatch - go run github.com/GeertJohan/go.rice/rice append --exec chainwatch -i ./cmd/lotus-chainwatch -.PHONY: chainwatch -BINS+=chainwatch +lotus-chainwatch: + rm -f lotus-chainwatch + go build $(GOFLAGS) -o lotus-chainwatch ./cmd/lotus-chainwatch +.PHONY: lotus-chainwatch +BINS+=lotus-chainwatch -bench: - rm -f bench - go build -o bench ./cmd/lotus-bench - go run github.com/GeertJohan/go.rice/rice append --exec bench -i ./build -.PHONY: bench -BINS+=bench +lotus-bench: + rm -f lotus-bench + go build -o lotus-bench ./cmd/lotus-bench + go run github.com/GeertJohan/go.rice/rice append --exec lotus-bench -i ./build +.PHONY: lotus-bench +BINS+=lotus-bench -stats: - rm -f stats - go build -o stats ./tools/stats -.PHONY: stats -BINS+=stats +lotus-stats: + rm -f lotus-stats + go build -o lotus-stats ./cmd/lotus-stats + go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build +.PHONY: lotus-stats +BINS+=lotus-stats + +lotus-pcr: + rm -f lotus-pcr + go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr + go run github.com/GeertJohan/go.rice/rice append --exec lotus-pcr -i ./build +.PHONY: lotus-pcr +BINS+=lotus-pcr + +lotus-health: + rm -f lotus-health + go build -o lotus-health ./cmd/lotus-health + go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build +.PHONY: lotus-health +BINS+=lotus-health + +testground: + go build -tags testground -o /dev/null ./cmd/lotus +.PHONY: testground +BINS+=testground + +install-chainwatch: lotus-chainwatch + install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch + +# SYSTEMD + +install-daemon-service: install-daemon + mkdir -p /etc/systemd/system + mkdir -p /var/log/lotus + install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service + systemctl daemon-reload + @echo + @echo "lotus-daemon service installed. Don't forget to run 'sudo systemctl start lotus-daemon' to start it and 'sudo systemctl enable lotus-daemon' for it to be enabled on startup." + +install-miner-service: install-miner install-daemon-service + mkdir -p /etc/systemd/system + mkdir -p /var/log/lotus + install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service + systemctl daemon-reload + @echo + @echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup." + +install-chainwatch-service: install-chainwatch install-daemon-service + mkdir -p /etc/systemd/system + mkdir -p /var/log/lotus + install -C -m 0644 ./scripts/lotus-chainwatch.service /etc/systemd/system/lotus-chainwatch.service + systemctl daemon-reload + @echo + @echo "chainwatch service installed. Don't forget to run 'sudo systemctl start lotus-chainwatch' to start it and 'sudo systemctl enable lotus-chainwatch' for it to be enabled on startup." + +install-main-services: install-miner-service + +install-all-services: install-main-services install-chainwatch-service + +install-services: install-main-services + +clean-daemon-service: clean-miner-service clean-chainwatch-service + -systemctl stop lotus-daemon + -systemctl disable lotus-daemon + rm -f /etc/systemd/system/lotus-daemon.service + systemctl daemon-reload + +clean-miner-service: + -systemctl stop lotus-miner + -systemctl disable lotus-miner + rm -f /etc/systemd/system/lotus-miner.service + systemctl daemon-reload + +clean-chainwatch-service: + -systemctl stop lotus-chainwatch + -systemctl disable lotus-chainwatch + rm -f /etc/systemd/system/lotus-chainwatch.service + systemctl daemon-reload + +clean-main-services: clean-daemon-service + +clean-all-services: clean-main-services + +clean-services: clean-all-services # MISC buildall: $(BINS) +completions: + ./scripts/make-completions.sh lotus + ./scripts/make-completions.sh lotus-miner +.PHONY: completions + +install-completions: + mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/ + install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus + install -C ./scripts/bash-completion/lotus-miner /usr/share/bash-completion/completions/lotus-miner + install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus + install -C ./scripts/zsh-completion/lotus-miner /usr/local/share/zsh/site-functions/_lotus-miner + clean: rm -rf $(CLEAN) $(BINS) -$(MAKE) -C $(FFI_PATH) clean @@ -163,6 +278,15 @@ dist-clean: type-gen: go run ./gen/main.go + go generate ./... + +method-gen: + (cd ./lotuspond/front/src/chain && go run ./methodgen.go) + +gen: type-gen method-gen + +docsgen: + go run ./api/docgen > documentation/en/api-methods.md print-%: @echo $*=$($*) diff --git a/README.md b/README.md index b89849496..766317e4f 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,47 @@ -![Lotus](documentation/images/lotus_logo_h.png) +

+ + Project Lotus Logo + +

-# Project Lotus - 莲 +

Project Lotus - 莲

-Lotus is an experimental implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://github.com/filecoin-project/specs). +

+ + + + +
+

-## Development - -All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1). +Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://spec.filecoin.io). ## Building & Documentation -For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation). +For instructions on how to build lotus from source, please visit [Lotus build and setup instruction](https://docs.filecoin.io/get-started/lotus/installation/#minimal-requirements) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation). + +## Reporting a Vulnerability + +Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details. + +## Development + +The main branches under development at the moment are: +* [`master`](https://github.com/filecoin-project/lotus): current testnet. +* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes. +* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits. + +### Tracker + +All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch. + +### Packages + +The lotus Filecoin implementation unfolds into the following packages: + +- [This repo](https://github.com/filecoin-project/lotus) +- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board) +- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board) ## License diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..ecb600deb --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,29 @@ +# Security Policy + +## Reporting a Vulnerability + +For *critical* bugs, please send an email to security@filecoin.org. + +The bug reporting process differs between bugs that are critical and may crash the network, and others that are unlikely to cause problems if malicious parties know about it. For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). + +Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report. + +Here are some examples of bugs we would consider 'critical': + +* If you can spend from a `multisig` wallet you do not control the keys for. +* If you can cause a miner to be slashed without them actually misbehaving. +* If you can maintain power without submitting windowed posts regularly. +* If you can craft a message that causes lotus nodes to panic. +* If you can cause your miner to win significantly more blocks than it should. +* If you can craft a message that causes a persistent fork in the network. +* If you can cause the total amount of Filecoin in the network to no longer be 2 billion. + +This is not an exhaustive list, but should provide some idea of what we consider 'critical'. + +## Supported Versions + +* TODO: This should be defined and set up by Mainnet launch. + +| Version | Supported | +| ------- | ------------------ | +| Testnet | :white_check_mark: | diff --git a/api/api_common.go b/api/api_common.go index 81608c59e..f8fcbe8c5 100644 --- a/api/api_common.go +++ b/api/api_common.go @@ -4,31 +4,61 @@ import ( "context" "fmt" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/go-jsonrpc/auth" + metrics "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" + + "github.com/filecoin-project/lotus/build" ) -type Permission = string - type Common interface { - // Auth - AuthVerify(ctx context.Context, token string) ([]Permission, error) - AuthNew(ctx context.Context, perms []Permission) ([]byte, error) - // network + // MethodGroup: Auth + + AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) + AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) + + // MethodGroup: Net NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) NetPeers(context.Context) ([]peer.AddrInfo, error) NetConnect(context.Context, peer.AddrInfo) error NetAddrsListen(context.Context) (peer.AddrInfo, error) NetDisconnect(context.Context, peer.ID) error + NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) + NetPubsubScores(context.Context) ([]PubsubScore, error) + NetAutoNatStatus(context.Context) (NatInfo, error) + NetAgentVersion(ctx context.Context, p peer.ID) (string, error) + + // NetBandwidthStats returns statistics about the nodes total bandwidth + // usage and current rate across all peers and protocols. + NetBandwidthStats(ctx context.Context) (metrics.Stats, error) + + // NetBandwidthStatsByPeer returns statistics about the nodes bandwidth + // usage and current rate per peer + NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) + + // NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth + // usage and current rate per protocol + NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) + + // MethodGroup: Common // ID returns peerID of libp2p node backing this API ID(context.Context) (peer.ID, error) // Version provides information about API provider Version(context.Context) (Version, error) + + LogList(context.Context) ([]string, error) + LogSetLevel(context.Context, string, string) error + + // trigger graceful shutdown + Shutdown(context.Context) error + + Closing(context.Context) (<-chan struct{}, error) } // Version provides various build-time information @@ -50,3 +80,8 @@ type Version struct { func (v Version) String() string { return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String()) } + +type NatInfo struct { + Reachability network.Reachability + PublicAddr string +} diff --git a/api/api_full.go b/api/api_full.go index 996d4abbe..6d2d0c7b5 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -2,87 +2,292 @@ package api import ( "context" + "fmt" "time" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" - "github.com/ipfs/go-filestore" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) // FullNode API is a low-level interface to the Filecoin network full node type FullNode interface { Common - // chain + // MethodGroup: Chain + // The Chain method group contains methods for interacting with the + // blockchain, but that do not require any form of state computation. - // ChainNotify returns channel with chain head updates - // First message is guaranteed to be of len == 1, and type == 'current' - ChainNotify(context.Context) (<-chan []*store.HeadChange, error) + // ChainNotify returns channel with chain head updates. + // First message is guaranteed to be of len == 1, and type == 'current'. + ChainNotify(context.Context) (<-chan []*HeadChange, error) + + // ChainHead returns the current head of the chain. ChainHead(context.Context) (*types.TipSet, error) - ChainGetRandomness(context.Context, types.TipSetKey, int64) ([]byte, error) + + // ChainGetRandomnessFromTickets is used to sample the chain for randomness. + ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) + + // ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. + ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) + + // ChainGetBlock returns the block specified by the given CID. ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) + // ChainGetTipSet returns the tipset specified by the given TipSetKey. ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error) - ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) - ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error) - ChainGetTipSetByHeight(context.Context, uint64, *types.TipSet) (*types.TipSet, error) + + // ChainGetBlockMessages returns messages stored in the specified block. + ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) + + // ChainGetParentReceipts returns receipts for messages in parent tipset of + // the specified block. + ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) + + // ChainGetParentMessages returns messages stored in parent tipset of the + // specified block. + ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) + + // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. + // If there are no blocks at the specified epoch, a tipset at an earlier epoch + // will be returned. + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + + // ChainReadObj reads ipld nodes referenced by the specified CID from chain + // blockstore and returns raw bytes. ChainReadObj(context.Context, cid.Cid) ([]byte, error) - ChainSetHead(context.Context, *types.TipSet) error + + // ChainDeleteObj deletes node referenced by the given CID + ChainDeleteObj(context.Context, cid.Cid) error + + // ChainHasObj checks if a given CID exists in the chain blockstore. + ChainHasObj(context.Context, cid.Cid) (bool, error) + + // ChainStatObj returns statistics about the graph referenced by 'obj'. + // If 'base' is also specified, then the returned stat will be a diff + // between the two objects. + ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error) + + // ChainSetHead forcefully sets current chain head. Use with caution. + ChainSetHead(context.Context, types.TipSetKey) error + + // ChainGetGenesis returns the genesis tipset. ChainGetGenesis(context.Context) (*types.TipSet, error) - ChainTipSetWeight(context.Context, *types.TipSet) (types.BigInt, error) - ChainGetNode(ctx context.Context, p string) (interface{}, error) + + // ChainTipSetWeight computes weight for the specified tipset. + ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) + ChainGetNode(ctx context.Context, p string) (*IpldObject, error) + + // ChainGetMessage reads a message referenced by the specified CID from the + // chain blockstore. ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) - // syncer + // ChainGetPath returns a set of revert/apply operations needed to get from + // one tipset to another, for example: + //``` + // to + // ^ + // from tAA + // ^ ^ + // tBA tAB + // ^---*--^ + // ^ + // tRR + //``` + // Would return `[revert(tBA), apply(tAB), apply(tAA)]` + ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) + + // ChainExport returns a stream of bytes with CAR dump of chain data. + // The exported chain data includes the header chain from the given tipset + // back to genesis, the entire genesis state, and the most recent 'nroots' + // state trees. + // If oldmsgskip is set, messages from before the requested roots are also not included. + ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) + + // MethodGroup: Beacon + // The Beacon method group contains methods for interacting with the random beacon (DRAND) + + // BeaconGetEntry returns the beacon entry for the given filecoin epoch. If + // the entry has not yet been produced, the call will block until the entry + // becomes available + BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) + + // GasEstimateFeeCap estimates gas fee cap + GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) + + // GasEstimateGasLimit estimates gas used by the message and returns it. + // It fails if message fails to execute. + GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) + + // GasEstimateGasPremium estimates what gas price should be used for a + // message to have high likelihood of inclusion in `nblocksincl` epochs. + + GasEstimateGasPremium(_ context.Context, nblocksincl uint64, + sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) + + // GasEstimateMessageGas estimates gas values for unset message gas fields + GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error) + + // MethodGroup: Sync + // The Sync method group contains methods for interacting with and + // observing the lotus sync service. + + // SyncState returns the current status of the lotus sync system. SyncState(context.Context) (*SyncState, error) + + // SyncSubmitBlock can be used to submit a newly created block to the. + // network through this node SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error + + // SyncIncomingBlocks returns a channel streaming incoming, potentially not + // yet synced block headers. SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) + + // SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it. + SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error + + // SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced. + // Use with extreme caution. SyncMarkBad(ctx context.Context, bcid cid.Cid) error - // messages - MpoolPending(context.Context, *types.TipSet) ([]*types.SignedMessage, error) + // SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again. + SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error + + // SyncCheckBad checks if a block was marked as bad, and if it was, returns + // the reason. + SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) + + // MethodGroup: Mpool + // The Mpool methods are for interacting with the message pool. The message pool + // manages all incoming and outgoing 'messages' going over the network. + + // MpoolPending returns pending mempool messages. + MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) + + // MpoolSelect returns a list of pending messages for inclusion in the next block + MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) + + // MpoolPush pushes a signed message to mempool. MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) - MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) // get nonce, sign, push + + // MpoolPushMessage atomically assigns a nonce, signs, and pushes a message + // to mempool. + // maxFee is only used when GasFeeCap/GasPremium fields aren't specified + // + // When maxFee is set to 0, MpoolPushMessage will guess appropriate fee + // based on current chain conditions + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) + + // MpoolGetNonce gets next nonce for the specified sender. + // Note that this method may not be atomic. Use MpoolPushMessage instead. MpoolGetNonce(context.Context, address.Address) (uint64, error) MpoolSub(context.Context) (<-chan MpoolUpdate, error) - // FullNodeStruct + // MpoolClear clears pending messages from the mpool + MpoolClear(context.Context, bool) error - // miner + // MpoolGetConfig returns (a copy of) the current mpool config + MpoolGetConfig(context.Context) (*types.MpoolConfig, error) + // MpoolSetConfig sets the mpool config to (a copy of) the supplied config + MpoolSetConfig(context.Context, *types.MpoolConfig) error - MinerCreateBlock(context.Context, address.Address, *types.TipSet, *types.Ticket, *types.EPostProof, []*types.SignedMessage, uint64, uint64) (*types.BlockMsg, error) + // MethodGroup: Miner + + MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) + MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) // // UX ? - // wallet + // MethodGroup: Wallet - WalletNew(context.Context, string) (address.Address, error) + // WalletNew creates a new address in the wallet with the given sigType. + WalletNew(context.Context, crypto.SigType) (address.Address, error) + // WalletHas indicates whether the given address is in the wallet. WalletHas(context.Context, address.Address) (bool, error) + // WalletList lists all the addresses in the wallet. WalletList(context.Context) ([]address.Address, error) + // WalletBalance returns the balance of the given address at the current head of the chain. WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletSign(context.Context, address.Address, []byte) (*types.Signature, error) + // WalletSign signs the given bytes using the given address. + WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) + // WalletSignMessage signs the given message using the given address. WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) + // WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. + // The address does not have to be in the wallet. + WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) + // WalletDefaultAddress returns the address marked as default in the wallet. WalletDefaultAddress(context.Context) (address.Address, error) + // WalletSetDefault marks the given address as as the default one. WalletSetDefault(context.Context, address.Address) error + // WalletExport returns the private key of an address in the wallet. WalletExport(context.Context, address.Address) (*types.KeyInfo, error) + // WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet. WalletImport(context.Context, *types.KeyInfo) (address.Address, error) + // WalletDelete deletes an address from the wallet. + WalletDelete(context.Context, address.Address) error // Other - // ClientImport imports file under the specified path into filestore - ClientImport(ctx context.Context, path string) (cid.Cid, error) - ClientStartDeal(ctx context.Context, data cid.Cid, addr address.Address, miner address.Address, epochPrice types.BigInt, blocksDuration uint64) (*cid.Cid, error) + // MethodGroup: Client + // The Client methods all have to do with interacting with the storage and + // retrieval markets as a client + + // ClientImport imports file under the specified path into filestore. + ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) + // ClientRemoveImport removes file import + ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error + // ClientStartDeal proposes a deal with a miner. + ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) + // ClientGetDealInfo returns the latest information about a given deal. ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) + // ClientListDeals returns information about the deals made by the local client. ClientListDeals(ctx context.Context) ([]DealInfo, error) + // ClientGetDealUpdates returns the status of updated deals + ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) + // ClientHasLocal indicates whether a certain CID is locally stored. ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) - ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error) - ClientRetrieve(ctx context.Context, order RetrievalOrder, path string) error - ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*types.SignedStorageAsk, error) + // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). + ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) + // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. + ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) + // ClientRetrieve initiates the retrieval of a file, as specified in the order. + ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error + // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel + // of status updates. + ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) + // ClientQueryAsk returns a signed StorageAsk from the specified miner. + ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) + // ClientCalcCommP calculates the CommP for a specified file + ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) + // ClientGenCar generates a CAR file for the specified file. + ClientGenCar(ctx context.Context, ref FileRef, outpath string) error + // ClientDealSize calculates real deal data size + ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) + // ClientListTransfers returns the status of all ongoing transfers of data + ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) + ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) + // ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel + // which are stuck due to insufficient funds + ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error // ClientUnimport removes references to the specified file from filestore //ClientUnimport(path string) @@ -92,77 +297,244 @@ type FullNode interface { //ClientListAsks() []Ask - // if tipset is nil, we'll use heaviest - StateCall(context.Context, *types.Message, *types.TipSet) (*types.MessageReceipt, error) - StateReplay(context.Context, *types.TipSet, cid.Cid) (*ReplayResults, error) - StateGetActor(ctx context.Context, actor address.Address, ts *types.TipSet) (*types.Actor, error) - StateReadState(ctx context.Context, act *types.Actor, ts *types.TipSet) (*ActorState, error) - StateListMessages(ctx context.Context, match *types.Message, ts *types.TipSet, toht uint64) ([]cid.Cid, error) + // MethodGroup: State + // The State methods are used to query, inspect, and interact with chain state. + // All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. + // A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. - StateMinerSectors(context.Context, address.Address, *types.TipSet) ([]*ChainSectorInfo, error) - StateMinerProvingSet(context.Context, address.Address, *types.TipSet) ([]*ChainSectorInfo, error) - StateMinerPower(context.Context, address.Address, *types.TipSet) (MinerPower, error) - StateMinerWorker(context.Context, address.Address, *types.TipSet) (address.Address, error) - StateMinerPeerID(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error) - StateMinerElectionPeriodStart(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error) - StateMinerSectorSize(context.Context, address.Address, *types.TipSet) (uint64, error) - StatePledgeCollateral(context.Context, *types.TipSet) (types.BigInt, error) - StateWaitMsg(context.Context, cid.Cid) (*MsgWait, error) - StateListMiners(context.Context, *types.TipSet) ([]address.Address, error) - StateListActors(context.Context, *types.TipSet) ([]address.Address, error) - StateMarketBalance(context.Context, address.Address, *types.TipSet) (actors.StorageParticipantBalance, error) - StateMarketParticipants(context.Context, *types.TipSet) (map[string]actors.StorageParticipantBalance, error) - StateMarketDeals(context.Context, *types.TipSet) (map[string]actors.OnChainDeal, error) - StateMarketStorageDeal(context.Context, uint64, *types.TipSet) (*actors.OnChainDeal, error) - StateLookupID(context.Context, address.Address, *types.TipSet) (address.Address, error) + // StateCall runs the given message and returns its result without any persisted changes. + StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) + // StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset. + StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) + // StateGetActor returns the indicated actor's nonce and balance. + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + // StateReadState returns the indicated actor's state. + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) + // StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. + StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) + + // StateNetworkName returns the name of the network the node is synced to + StateNetworkName(context.Context) (dtypes.NetworkName, error) + // StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. + StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) + // StateMinerActiveSectors returns info about sectors that a given miner is actively proving. + StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) + // StateMinerProvingDeadline calculates the deadline at some epoch for a proving period + // and returns the deadline-related calculations. + StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + // StateMinerPower returns the power of the indicated miner + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) + // StateMinerInfo returns info about the indicated miner + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + // StateMinerDeadlines returns all the proving deadlines for the given miner + StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error) + // StateMinerPartitions returns all partitions in the specified deadline + StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error) + // StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner + StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + // StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset + StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) + // StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner + StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + // StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector + StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) + // StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector + StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) + // StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent + StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + // StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector + StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) + // StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found + // NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate + // expiration epoch + StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) + // StateSectorExpiration returns epoch at which given sector will expire + StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) + // StateSectorPartition finds deadline/partition with the specified sector + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) + // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed + StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) + // StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip + StateMsgGasCost(context.Context, cid.Cid, types.TipSetKey) (*MsgGasCost, error) + // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the + // message arrives on chain, and gets to the indicated confidence depth. + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error) + // StateListMiners returns the addresses of every miner that has claimed power in the Power Actor + StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) + // StateListActors returns the addresses of every actor in the state + StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) + // StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) + // StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market + StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) + // StateMarketDeals returns information about every deal in the Storage Market + StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) + // StateMarketStorageDeal returns information about the indicated deal + StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) + // StateLookupID retrieves the ID address of the given address + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) + // StateAccountKey returns the public key address of the given ID address + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + // StateChangedActors returns all the actors whose states change between the two given state CIDs + // TODO: Should this take tipset keys instead? StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) - StateGetReceipt(context.Context, cid.Cid, *types.TipSet) (*types.MessageReceipt, error) - StateMinerSectorCount(context.Context, address.Address, *types.TipSet) (MinerSectors, error) + // StateGetReceipt returns the message receipt for the given message + StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + // StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set + StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) + // StateCompute is a flexible command that applies the given messages on the given tipset. + // The messages are run as though the VM were at the provided height. + StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) + // StateVerifiedClientStatus returns the data cap for the given address. + // Returns nil if there is no entry in the data cap table for the + // address. + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + // StateDealProviderCollateralBounds returns the min and max collateral a storage provider + // can issue. It takes the deal size and verified status as parameters. + StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) - MarketEnsureAvailable(context.Context, address.Address, types.BigInt) error + // StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset + StateCirculatingSupply(context.Context, types.TipSetKey) (CirculatingSupply, error) + // StateNetworkVersion returns the network version at the given tipset + StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + + // MethodGroup: Msig + // The Msig methods are used to interact with multisig wallets on the + // filecoin network + + // MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent + MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + // MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. + // It takes the following params: , , + MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) + // MsigCreate creates a multisig wallet + // It takes the following params: , , + //, , + MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) + // MsigPropose proposes a multisig message + // It takes the following params: , , , + // , , + MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) + // MsigApprove approves a previously-proposed multisig message + // It takes the following params: , , , , , + // , , + MsigApprove(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) + // MsigCancel cancels a previously-proposed multisig message + // It takes the following params: , , , , + // , , + MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) + // MsigAddPropose proposes adding a signer in the multisig + // It takes the following params: , , + // , + MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) + // MsigAddApprove approves a previously proposed AddSigner message + // It takes the following params: , , , + // , , + MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) + // MsigAddCancel cancels a previously proposed AddSigner message + // It takes the following params: , , , + // , + MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) + // MsigSwapPropose proposes swapping 2 signers in the multisig + // It takes the following params: , , + // , + MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) + // MsigSwapApprove approves a previously proposed SwapSigner + // It takes the following params: , , , + // , , + MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) + // MsigSwapCancel cancels a previously proposed SwapSigner message + // It takes the following params: , , , + // , + MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) + + MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) // MarketFreeBalance - PaychGet(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*ChannelInfo, error) + // MethodGroup: Paych + // The Paych methods are for interacting with and managing payment channels + + PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) + PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) + PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) + PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) PaychList(context.Context) ([]address.Address, error) PaychStatus(context.Context, address.Address) (*PaychStatus, error) - PaychClose(context.Context, address.Address) (cid.Cid, error) + PaychSettle(context.Context, address.Address) (cid.Cid, error) + PaychCollect(context.Context, address.Address) (cid.Cid, error) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) - PaychVoucherCheckValid(context.Context, address.Address, *types.SignedVoucher) error - PaychVoucherCheckSpendable(context.Context, address.Address, *types.SignedVoucher, []byte, []byte) (bool, error) - PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*types.SignedVoucher, error) - PaychVoucherAdd(context.Context, address.Address, *types.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) - PaychVoucherList(context.Context, address.Address) ([]*types.SignedVoucher, error) - PaychVoucherSubmit(context.Context, address.Address, *types.SignedVoucher) (cid.Cid, error) + PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error + PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) + PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error) + PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) + PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) + PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) +} + +type FileRef struct { + Path string + IsCAR bool } type MinerSectors struct { - Pset uint64 - Sset uint64 + // Live sectors that should be proven. + Live uint64 + // Sectors actively contributing to power. + Active uint64 + // Sectors with failed proofs. + Faulty uint64 +} + +type ImportRes struct { + Root cid.Cid + ImportID multistore.StoreID } type Import struct { - Status filestore.Status - Key cid.Cid + Key multistore.StoreID + Err string + + Root *cid.Cid + Source string FilePath string - Size uint64 } type DealInfo struct { ProposalCid cid.Cid - State DealState + State storagemarket.StorageDealStatus + Message string // more information about deal state, particularly errors Provider address.Address - PieceRef []byte // cid bytes + DataRef *storagemarket.DataRef + PieceCID cid.Cid Size uint64 PricePerEpoch types.BigInt Duration uint64 + + DealID abi.DealID + + CreationTime time.Time } -type MsgWait struct { - Receipt types.MessageReceipt - TipSet *types.TipSet +type MsgLookup struct { + Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed + Receipt types.MessageReceipt + ReturnDec interface{} + TipSet types.TipSetKey + Height abi.ChainEpoch +} + +type MsgGasCost struct { + Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed + GasUsed abi.TokenAmount + BaseFeeBurn abi.TokenAmount + OverEstimationBurn abi.TokenAmount + MinerPenalty abi.TokenAmount + MinerTip abi.TokenAmount + Refund abi.TokenAmount + TotalCost abi.TokenAmount } type BlockMessages struct { @@ -177,12 +549,6 @@ type Message struct { Message *types.Message } -type ChainSectorInfo struct { - SectorID uint64 - CommD []byte - CommR []byte -} - type ActorState struct { Balance types.BigInt State interface{} @@ -202,70 +568,147 @@ type PaychStatus struct { } type ChannelInfo struct { - Channel address.Address - ChannelMessage cid.Cid + Channel address.Address + WaitSentinel cid.Cid +} + +type ChannelAvailableFunds struct { + // Channel is the address of the channel + Channel *address.Address + // From is the from address of the channel (channel creator) + From address.Address + // To is the to address of the channel + To address.Address + // ConfirmedAmt is the amount of funds that have been confirmed on-chain + // for the channel + ConfirmedAmt types.BigInt + // PendingAmt is the amount of funds that are pending confirmation on-chain + PendingAmt types.BigInt + // PendingWaitSentinel can be used with PaychGetWaitReady to wait for + // confirmation of pending funds + PendingWaitSentinel *cid.Cid + // QueuedAmt is the amount that is queued up behind a pending request + QueuedAmt types.BigInt + // VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain + // and in the local datastore + VoucherReedeemedAmt types.BigInt } type PaymentInfo struct { - Channel address.Address - ChannelMessage *cid.Cid - Vouchers []*types.SignedVoucher + Channel address.Address + WaitSentinel cid.Cid + Vouchers []*paych.SignedVoucher } type VoucherSpec struct { - Amount types.BigInt - TimeLock uint64 - MinClose uint64 + Amount types.BigInt + TimeLockMin abi.ChainEpoch + TimeLockMax abi.ChainEpoch + MinSettle abi.ChainEpoch - Extra *types.ModVerifyParams + Extra *paych.ModVerifyParams +} + +// VoucherCreateResult is the response to calling PaychVoucherCreate +type VoucherCreateResult struct { + // Voucher that was created, or nil if there was an error or if there + // were insufficient funds in the channel + Voucher *paych.SignedVoucher + // Shortfall is the additional amount that would be needed in the channel + // in order to be able to create the voucher + Shortfall types.BigInt } type MinerPower struct { - MinerPower types.BigInt - TotalPower types.BigInt + MinerPower power.Claim + TotalPower power.Claim + HasMinPower bool } type QueryOffer struct { Err string - Root cid.Cid + Root cid.Cid + Piece *cid.Cid - Size uint64 - MinPrice types.BigInt - - Miner address.Address - MinerPeerID peer.ID + Size uint64 + MinPrice types.BigInt + UnsealPrice types.BigInt + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + Miner address.Address + MinerPeer retrievalmarket.RetrievalPeer } func (o *QueryOffer) Order(client address.Address) RetrievalOrder { return RetrievalOrder{ - Root: o.Root, - Size: o.Size, - Total: o.MinPrice, + Root: o.Root, + Piece: o.Piece, + Size: o.Size, + Total: o.MinPrice, + UnsealPrice: o.UnsealPrice, + PaymentInterval: o.PaymentInterval, + PaymentIntervalIncrease: o.PaymentIntervalIncrease, + Client: client, - Client: client, - - Miner: o.Miner, - MinerPeerID: o.MinerPeerID, + Miner: o.Miner, + MinerPeer: o.MinerPeer, } } +type MarketBalance struct { + Escrow big.Int + Locked big.Int +} + +type MarketDeal struct { + Proposal market.DealProposal + State market.DealState +} + type RetrievalOrder struct { // TODO: make this less unixfs specific - Root cid.Cid - Size uint64 + Root cid.Cid + Piece *cid.Cid + Size uint64 // TODO: support offset - Total types.BigInt - - Client address.Address - Miner address.Address - MinerPeerID peer.ID + Total types.BigInt + UnsealPrice types.BigInt + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + Client address.Address + Miner address.Address + MinerPeer retrievalmarket.RetrievalPeer } -type ReplayResults struct { - Msg *types.Message - Receipt *types.MessageReceipt - Error string +type InvocResult struct { + Msg *types.Message + MsgRct *types.MessageReceipt + ExecutionTrace types.ExecutionTrace + Error string + Duration time.Duration +} + +type MethodCall struct { + types.MessageReceipt + Error string +} + +type StartDealParams struct { + Data *storagemarket.DataRef + Wallet address.Address + Miner address.Address + EpochPrice types.BigInt + MinBlocksDuration uint64 + ProviderCollateral big.Int + DealStartEpoch abi.ChainEpoch + FastRetrieval bool + VerifiedDeal bool +} + +type IpldObject struct { + Cid cid.Cid + Obj interface{} } type ActiveSync struct { @@ -273,7 +716,7 @@ type ActiveSync struct { Target *types.TipSet Stage SyncStateStage - Height uint64 + Height abi.ChainEpoch Start time.Time End time.Time @@ -282,6 +725,8 @@ type ActiveSync struct { type SyncState struct { ActiveSyncs []ActiveSync + + VMApplied uint64 } type SyncStateStage int @@ -293,8 +738,28 @@ const ( StageMessages StageSyncComplete StageSyncErrored + StageFetchingMessages ) +func (v SyncStateStage) String() string { + switch v { + case StageHeaders: + return "header sync" + case StagePersistHeaders: + return "persisting headers" + case StageMessages: + return "message sync" + case StageSyncComplete: + return "complete" + case StageSyncErrored: + return "error" + case StageFetchingMessages: + return "fetching messages" + default: + return fmt.Sprintf("", v) + } +} + type MpoolChange int const ( @@ -306,3 +771,82 @@ type MpoolUpdate struct { Type MpoolChange Message *types.SignedMessage } + +type ComputeStateOutput struct { + Root cid.Cid + Trace []*InvocResult +} + +type DealCollateralBounds struct { + Min abi.TokenAmount + Max abi.TokenAmount +} + +type CirculatingSupply struct { + FilVested abi.TokenAmount + FilMined abi.TokenAmount + FilBurnt abi.TokenAmount + FilLocked abi.TokenAmount + FilCirculating abi.TokenAmount +} + +type MiningBaseInfo struct { + MinerPower types.BigInt + NetworkPower types.BigInt + Sectors []builtin.SectorInfo + WorkerKey address.Address + SectorSize abi.SectorSize + PrevBeaconEntry types.BeaconEntry + BeaconEntries []types.BeaconEntry + HasMinPower bool +} + +type BlockTemplate struct { + Miner address.Address + Parents types.TipSetKey + Ticket *types.Ticket + Eproof *types.ElectionProof + BeaconValues []types.BeaconEntry + Messages []*types.SignedMessage + Epoch abi.ChainEpoch + Timestamp uint64 + WinningPoStProof []builtin.PoStProof +} + +type DataSize struct { + PayloadSize int64 + PieceSize abi.PaddedPieceSize +} + +type CommPRet struct { + Root cid.Cid + Size abi.UnpaddedPieceSize +} +type HeadChange struct { + Type string + Val *types.TipSet +} + +type MsigProposeResponse int + +const ( + MsigApprove MsigProposeResponse = iota + MsigCancel +) + +type Deadline struct { + PostSubmissions bitfield.BitField +} + +type Partition struct { + AllSectors bitfield.BitField + FaultySectors bitfield.BitField + RecoveringSectors bitfield.BitField + LiveSectors bitfield.BitField + ActiveSectors bitfield.BitField +} + +type Fault struct { + Miner address.Address + Epoch abi.ChainEpoch +} diff --git a/api/api_storage.go b/api/api_storage.go index 8d0c39038..824772181 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -1,112 +1,182 @@ package api import ( + "bytes" "context" + "time" + + "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -// alias because cbor-gen doesn't like non-alias types -type SectorState = uint64 - -const ( - UndefinedSectorState SectorState = iota - - Empty // TODO: Is this useful - Packing // sector not in sealStore, and not on chain - - Unsealed // sealing / queued - PreCommitting // on chain pre-commit - PreCommitted // waiting for seed - Committing - CommitWait // waiting for message to land on chain - Proving - - SealFailed - PreCommitFailed - SealCommitFailed - CommitFailed - - FailedUnrecoverable - - Faulty // sector is corrupted or gone for some reason - FaultReported // sector has been declared as a fault on chain - FaultedFinal // fault declared on chain -) - -var SectorStates = []string{ - UndefinedSectorState: "UndefinedSectorState", - Empty: "Empty", - Packing: "Packing", - Unsealed: "Unsealed", - PreCommitting: "PreCommitting", - PreCommitted: "PreCommitted", - Committing: "Committing", - CommitWait: "CommitWait", - Proving: "Proving", - - SealFailed: "SealFailed", - PreCommitFailed: "PreCommitFailed", - SealCommitFailed: "SealCommitFailed", - CommitFailed: "CommitFailed", - - FailedUnrecoverable: "FailedUnrecoverable", - - Faulty: "Faulty", - FaultReported: "FaultReported", - FaultedFinal: "FaultedFinal", -} - // StorageMiner is a low-level interface to the Filecoin network storage miner node type StorageMiner interface { Common ActorAddress(context.Context) (address.Address, error) - ActorSectorSize(context.Context, address.Address) (uint64, error) + ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) + + MiningBase(context.Context) (*types.TipSet, error) // Temp api for testing PledgeSector(context.Context) error // Get the status of a given sector by ID - SectorsStatus(context.Context, uint64) (SectorInfo, error) + SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) // List all staged sectors - SectorsList(context.Context) ([]uint64, error) + SectorsList(context.Context) ([]abi.SectorNumber, error) SectorsRefs(context.Context) (map[string][]SealedRef, error) - SectorsUpdate(context.Context, uint64, SectorState) error + // SectorStartSealing can be called on sectors in Empty or WaitDeals states + // to trigger sealing early + SectorStartSealing(context.Context, abi.SectorNumber) error + // SectorSetSealDelay sets the time that a newly-created sector + // waits for more deals before it starts sealing + SectorSetSealDelay(context.Context, time.Duration) error + // SectorGetSealDelay gets the time that a newly-created sector + // waits for more deals before it starts sealing + SectorGetSealDelay(context.Context) (time.Duration, error) + // SectorSetExpectedSealDuration sets the expected time for a sector to seal + SectorSetExpectedSealDuration(context.Context, time.Duration) error + // SectorGetExpectedSealDuration gets the expected time for a sector to seal + SectorGetExpectedSealDuration(context.Context) (time.Duration, error) + SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error + SectorRemove(context.Context, abi.SectorNumber) error + SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error - WorkerStats(context.Context) (sectorbuilder.WorkerStats, error) + StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) + StorageLocal(ctx context.Context) (map[stores.ID]string, error) + StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) - // WorkerQueue registers a remote worker - WorkerQueue(context.Context, sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error) + // WorkerConnect tells the node to connect to workers RPC + WorkerConnect(context.Context, string) error + WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error) + WorkerJobs(context.Context) (map[uint64][]storiface.WorkerJob, error) - WorkerDone(ctx context.Context, task uint64, res sectorbuilder.SealRes) error + // SealingSchedDiag dumps internal sealing scheduler state + SealingSchedDiag(context.Context) (interface{}, error) + + stores.SectorIndex + + MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error + MarketListDeals(ctx context.Context) ([]MarketDeal, error) + MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) + MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) + MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) + MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error + MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) + MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error + MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) + MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) + MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) + + DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error + DealsList(ctx context.Context) ([]MarketDeal, error) + DealsConsiderOnlineStorageDeals(context.Context) (bool, error) + DealsSetConsiderOnlineStorageDeals(context.Context, bool) error + DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) + DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error + DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) + DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error + DealsConsiderOfflineStorageDeals(context.Context) (bool, error) + DealsSetConsiderOfflineStorageDeals(context.Context, bool) error + DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) + DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error + + StorageAddLocal(ctx context.Context, path string) error + + PiecesListPieces(ctx context.Context) ([]cid.Cid, error) + PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) + PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) + PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) +} + +type SealRes struct { + Err string + GoErr error `json:"-"` + + Proof []byte +} + +type SectorLog struct { + Kind string + Timestamp uint64 + + Trace string + + Message string } type SectorInfo struct { - SectorID uint64 - State SectorState - CommD []byte - CommR []byte - Proof []byte - Deals []uint64 - Ticket sectorbuilder.SealTicket - Seed sectorbuilder.SealSeed - Retries uint64 + SectorID abi.SectorNumber + State SectorState + CommD *cid.Cid + CommR *cid.Cid + Proof []byte + Deals []abi.DealID + Ticket SealTicket + Seed SealSeed + PreCommitMsg *cid.Cid + CommitMsg *cid.Cid + Retries uint64 + ToUpgrade bool LastErr string + + Log []SectorLog + + // On Chain Info + SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s + Activation abi.ChainEpoch // Epoch during which the sector proof was accepted + Expiration abi.ChainEpoch // Epoch during which the sector expires + DealWeight abi.DealWeight // Integral of active deals over sector lifetime + VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime + InitialPledge abi.TokenAmount // Pledge collected to commit this sector + // Expiration Info + OnTime abi.ChainEpoch + // non-zero if sector is faulty, epoch at which it will be permanently + // removed if it doesn't recover + Early abi.ChainEpoch } type SealedRef struct { - SectorID uint64 - Offset uint64 - Size uint64 + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Size abi.UnpaddedPieceSize } type SealedRefs struct { Refs []SealedRef } + +type SealTicket struct { + Value abi.SealRandomness + Epoch abi.ChainEpoch +} + +type SealSeed struct { + Value abi.InteractiveSealRandomness + Epoch abi.ChainEpoch +} + +func (st *SealTicket) Equals(ost *SealTicket) bool { + return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch +} + +func (st *SealSeed) Equals(ost *SealSeed) bool { + return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch +} + +type SectorState string diff --git a/api/api_test.go b/api/api_test.go new file mode 100644 index 000000000..34c47f432 --- /dev/null +++ b/api/api_test.go @@ -0,0 +1,103 @@ +package api + +import ( + "encoding/json" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func goCmd() string { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix) + if _, err := os.Stat(path); err == nil { + return path + } + return "go" +} + +func TestDoesntDependOnFFI(t *testing.T) { + deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output() + if err != nil { + t.Fatal(err) + } + for _, pkg := range strings.Fields(string(deps)) { + if pkg == "github.com/filecoin-project/filecoin-ffi" { + t.Fatal("api depends on filecoin-ffi") + } + } +} + +func TestReturnTypes(t *testing.T) { + errType := reflect.TypeOf(new(error)).Elem() + bareIface := reflect.TypeOf(new(interface{})).Elem() + jmarsh := reflect.TypeOf(new(json.Marshaler)).Elem() + + tst := func(api interface{}) func(t *testing.T) { + return func(t *testing.T) { + ra := reflect.TypeOf(api).Elem() + for i := 0; i < ra.NumMethod(); i++ { + m := ra.Method(i) + switch m.Type.NumOut() { + case 1: // if 1 return value, it must be an error + require.Equal(t, errType, m.Type.Out(0), m.Name) + + case 2: // if 2 return values, first cant be an interface/function, second must be an error + seen := map[reflect.Type]struct{}{} + todo := []reflect.Type{m.Type.Out(0)} + for len(todo) > 0 { + typ := todo[len(todo)-1] + todo = todo[:len(todo)-1] + + if _, ok := seen[typ]; ok { + continue + } + seen[typ] = struct{}{} + + if typ.Kind() == reflect.Interface && typ != bareIface && !typ.Implements(jmarsh) { + t.Error("methods can't return interfaces", m.Name) + } + + switch typ.Kind() { + case reflect.Ptr: + fallthrough + case reflect.Array: + fallthrough + case reflect.Slice: + fallthrough + case reflect.Chan: + todo = append(todo, typ.Elem()) + case reflect.Map: + todo = append(todo, typ.Elem()) + todo = append(todo, typ.Key()) + case reflect.Struct: + for i := 0; i < typ.NumField(); i++ { + todo = append(todo, typ.Field(i).Type) + } + } + } + + require.NotEqual(t, reflect.Func.String(), m.Type.Out(0).Kind().String(), m.Name) + require.Equal(t, errType, m.Type.Out(1), m.Name) + + default: + t.Error("methods can only have 1 or 2 return values", m.Name) + } + } + } + } + + t.Run("common", tst(new(Common))) + t.Run("full", tst(new(FullNode))) + t.Run("miner", tst(new(StorageMiner))) + t.Run("worker", tst(new(WorkerAPI))) +} diff --git a/api/api_worker.go b/api/api_worker.go new file mode 100644 index 000000000..ac1446fdd --- /dev/null +++ b/api/api_worker.go @@ -0,0 +1,40 @@ +package api + +import ( + "context" + "io" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/build" +) + +type WorkerAPI interface { + Version(context.Context) (build.Version, error) + // TODO: Info() (name, ...) ? + + TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight + Paths(context.Context) ([]stores.StoragePath, error) + Info(context.Context) (storiface.WorkerInfo, error) + + AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) + + storage.Sealer + + MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error + + UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error + ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) + + StorageAddLocal(ctx context.Context, path string) error + + Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error + + Closing(context.Context) (<-chan struct{}, error) +} diff --git a/api/apibstore/apibstore.go b/api/apibstore/apibstore.go new file mode 100644 index 000000000..cf9f4f24c --- /dev/null +++ b/api/apibstore/apibstore.go @@ -0,0 +1,68 @@ +package apibstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/lib/blockstore" +) + +type ChainIO interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) +} + +type apiBStore struct { + api ChainIO +} + +func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore { + return &apiBStore{ + api: cio, + } +} + +func (a *apiBStore) DeleteBlock(cid.Cid) error { + return xerrors.New("not supported") +} + +func (a *apiBStore) Has(c cid.Cid) (bool, error) { + return a.api.ChainHasObj(context.TODO(), c) +} + +func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) { + bb, err := a.api.ChainReadObj(context.TODO(), c) + if err != nil { + return nil, err + } + return blocks.NewBlockWithCid(bb, c) +} + +func (a *apiBStore) GetSize(c cid.Cid) (int, error) { + bb, err := a.api.ChainReadObj(context.TODO(), c) + if err != nil { + return 0, err + } + return len(bb), nil +} + +func (a *apiBStore) Put(blocks.Block) error { + return xerrors.New("not supported") +} + +func (a *apiBStore) PutMany([]blocks.Block) error { + return xerrors.New("not supported") +} + +func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, xerrors.New("not supported") +} + +func (a *apiBStore) HashOnRead(enabled bool) { + return +} + +var _ blockstore.Blockstore = &apiBStore{} diff --git a/api/apistruct/permissioned.go b/api/apistruct/permissioned.go index 4c29f6688..c93662733 100644 --- a/api/apistruct/permissioned.go +++ b/api/apistruct/permissioned.go @@ -1,105 +1,38 @@ package apistruct import ( - "context" - "reflect" - - "golang.org/x/xerrors" - + "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/api" ) -type permKey int - -var permCtxKey permKey - const ( // When changing these, update docs/API.md too - PermRead api.Permission = "read" // default - PermWrite api.Permission = "write" - PermSign api.Permission = "sign" // Use wallet keys for signing - PermAdmin api.Permission = "admin" // Manage permissions + PermRead auth.Permission = "read" // default + PermWrite auth.Permission = "write" + PermSign auth.Permission = "sign" // Use wallet keys for signing + PermAdmin auth.Permission = "admin" // Manage permissions ) -var AllPermissions = []api.Permission{PermRead, PermWrite, PermSign, PermAdmin} -var defaultPerms = []api.Permission{PermRead} - -func WithPerm(ctx context.Context, perms []api.Permission) context.Context { - return context.WithValue(ctx, permCtxKey, perms) -} +var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin} +var DefaultPerms = []auth.Permission{PermRead} func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner { var out StorageMinerStruct - permissionedAny(a, &out.Internal) - permissionedAny(a, &out.CommonStruct.Internal) + auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) + auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) return &out } func PermissionedFullAPI(a api.FullNode) api.FullNode { var out FullNodeStruct - permissionedAny(a, &out.Internal) - permissionedAny(a, &out.CommonStruct.Internal) + auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) + auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) return &out } -func HasPerm(ctx context.Context, perm api.Permission) bool { - callerPerms, ok := ctx.Value(permCtxKey).([]api.Permission) - if !ok { - callerPerms = defaultPerms - } - - for _, callerPerm := range callerPerms { - if callerPerm == perm { - return true - } - } - return false -} - -func permissionedAny(in interface{}, out interface{}) { - rint := reflect.ValueOf(out).Elem() - ra := reflect.ValueOf(in) - - for f := 0; f < rint.NumField(); f++ { - field := rint.Type().Field(f) - requiredPerm := api.Permission(field.Tag.Get("perm")) - if requiredPerm == "" { - panic("missing 'perm' tag on " + field.Name) // ok - } - - // Validate perm tag - ok := false - for _, perm := range AllPermissions { - if requiredPerm == perm { - ok = true - break - } - } - if !ok { - panic("unknown 'perm' tag on " + field.Name) // ok - } - - fn := ra.MethodByName(field.Name) - - rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) { - ctx := args[0].Interface().(context.Context) - if HasPerm(ctx, requiredPerm) { - return fn.Call(args) - } - - err := xerrors.Errorf("missing permission to invoke '%s' (need '%s')", field.Name, requiredPerm) - rerr := reflect.ValueOf(&err).Elem() - - if field.Type.NumOut() == 2 { - return []reflect.Value{ - reflect.Zero(field.Type.Out(0)), - rerr, - } - } else { - return []reflect.Value{rerr} - } - })) - - } +func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI { + var out WorkerStruct + auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) + return &out } diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index fbaff4b66..d5b6950ad 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -2,18 +2,41 @@ package apistruct import ( "context" + "io" + "time" - sectorbuilder "github.com/filecoin-project/go-sectorbuilder" + stnetwork "github.com/filecoin-project/go-state-types/network" "github.com/ipfs/go-cid" + metrics "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + marketevents "github.com/filecoin-project/lotus/markets/loggers" + "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) // All permissions are listed in permissioned.go @@ -21,17 +44,30 @@ var _ = AllPermissions type CommonStruct struct { Internal struct { - AuthVerify func(ctx context.Context, token string) ([]api.Permission, error) `perm:"read"` - AuthNew func(ctx context.Context, perms []api.Permission) ([]byte, error) `perm:"admin"` + AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"read"` + AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"` - NetConnectedness func(context.Context, peer.ID) (network.Connectedness, error) `perm:"read"` - NetPeers func(context.Context) ([]peer.AddrInfo, error) `perm:"read"` - NetConnect func(context.Context, peer.AddrInfo) error `perm:"write"` - NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"` - NetDisconnect func(context.Context, peer.ID) error `perm:"write"` + NetConnectedness func(context.Context, peer.ID) (network.Connectedness, error) `perm:"read"` + NetPeers func(context.Context) ([]peer.AddrInfo, error) `perm:"read"` + NetConnect func(context.Context, peer.AddrInfo) error `perm:"write"` + NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"` + NetDisconnect func(context.Context, peer.ID) error `perm:"write"` + NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"` + NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"` + NetAutoNatStatus func(context.Context) (api.NatInfo, error) `perm:"read"` + NetBandwidthStats func(ctx context.Context) (metrics.Stats, error) `perm:"read"` + NetBandwidthStatsByPeer func(ctx context.Context) (map[string]metrics.Stats, error) `perm:"read"` + NetBandwidthStatsByProtocol func(ctx context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"` + NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"` ID func(context.Context) (peer.ID, error) `perm:"read"` Version func(context.Context) (api.Version, error) `perm:"read"` + + LogList func(context.Context) ([]string, error) `perm:"write"` + LogSetLevel func(context.Context, string, string) error `perm:"write"` + + Shutdown func(context.Context) error `perm:"admin"` + Closing func(context.Context) (<-chan struct{}, error) `perm:"read"` } } @@ -40,132 +76,294 @@ type FullNodeStruct struct { CommonStruct Internal struct { - ChainNotify func(context.Context) (<-chan []*store.HeadChange, error) `perm:"read"` - ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"` - ChainGetRandomness func(context.Context, types.TipSetKey, int64) ([]byte, error) `perm:"read"` - ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"` - ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"` - ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"` - ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` - ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"` - ChainGetTipSetByHeight func(context.Context, uint64, *types.TipSet) (*types.TipSet, error) `perm:"read"` - ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"` - ChainSetHead func(context.Context, *types.TipSet) error `perm:"admin"` - ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"` - ChainTipSetWeight func(context.Context, *types.TipSet) (types.BigInt, error) `perm:"read"` - ChainGetNode func(ctx context.Context, p string) (interface{}, error) `perm:"read"` - ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"` + ChainNotify func(context.Context) (<-chan []*api.HeadChange, error) `perm:"read"` + ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"` + ChainGetRandomnessFromTickets func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"` + ChainGetRandomnessFromBeacon func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"` + ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"` + ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"` + ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"` + ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` + ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"` + ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"` + ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"` + ChainDeleteObj func(context.Context, cid.Cid) error `perm:"admin"` + ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"` + ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"` + ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"` + ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"` + ChainTipSetWeight func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"` + ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"` + ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"` + ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"` + ChainExport func(context.Context, abi.ChainEpoch, bool, types.TipSetKey) (<-chan []byte, error) `perm:"read"` + + BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` + + GasEstimateGasPremium func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"` + GasEstimateGasLimit func(context.Context, *types.Message, types.TipSetKey) (int64, error) `perm:"read"` + GasEstimateFeeCap func(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"` + GasEstimateMessageGas func(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) `perm:"read"` SyncState func(context.Context) (*api.SyncState, error) `perm:"read"` SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"` SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"` + SyncCheckpoint func(ctx context.Context, key types.TipSetKey) error `perm:"admin"` SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` + SyncUnmarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` + SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"` - MpoolPending func(context.Context, *types.TipSet) ([]*types.SignedMessage, error) `perm:"read"` - MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"` - MpoolPushMessage func(context.Context, *types.Message) (*types.SignedMessage, error) `perm:"sign"` - MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"` - MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"` + MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"` + MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"` - MinerCreateBlock func(context.Context, address.Address, *types.TipSet, *types.Ticket, *types.EPostProof, []*types.SignedMessage, uint64, uint64) (*types.BlockMsg, error) `perm:"write"` + MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"` - WalletNew func(context.Context, string) (address.Address, error) `perm:"write"` + MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` + MpoolClear func(context.Context, bool) error `perm:"write"` + + MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"` + MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"` + MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"` + MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"` + + MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"` + MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` + + WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"` WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` WalletList func(context.Context) ([]address.Address, error) `perm:"write"` WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"` - WalletSign func(context.Context, address.Address, []byte) (*types.Signature, error) `perm:"sign"` + WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"` WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"` + WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"` WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"` WalletSetDefault func(context.Context, address.Address) error `perm:"admin"` WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"` WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"` + WalletDelete func(context.Context, address.Address) error `perm:"write"` - ClientImport func(ctx context.Context, path string) (cid.Cid, error) `perm:"admin"` - ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` - ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` - ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"` - ClientStartDeal func(ctx context.Context, data cid.Cid, addr address.Address, miner address.Address, price types.BigInt, blocksDuration uint64) (*cid.Cid, error) `perm:"admin"` - ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` - ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` - ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, path string) error `perm:"admin"` - ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*types.SignedStorageAsk, error) `perm:"read"` + ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"` + ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` + ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"` + ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` + ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"` + ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"` + ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` + ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` + ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` + ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"` + ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"` + ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"` + ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"` + ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"` + ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"` + ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"` + ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` + ClientRetrieveTryRestartInsufficientFunds func(ctx context.Context, paymentChannel address.Address) error `perm:"write"` - StateMinerSectors func(context.Context, address.Address, *types.TipSet) ([]*api.ChainSectorInfo, error) `perm:"read"` - StateMinerProvingSet func(context.Context, address.Address, *types.TipSet) ([]*api.ChainSectorInfo, error) `perm:"read"` - StateMinerPower func(context.Context, address.Address, *types.TipSet) (api.MinerPower, error) `perm:"read"` - StateMinerWorker func(context.Context, address.Address, *types.TipSet) (address.Address, error) `perm:"read"` - StateMinerPeerID func(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error) `perm:"read"` - StateMinerElectionPeriodStart func(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error) `perm:"read"` - StateMinerSectorSize func(context.Context, address.Address, *types.TipSet) (uint64, error) `perm:"read"` - StateCall func(context.Context, *types.Message, *types.TipSet) (*types.MessageReceipt, error) `perm:"read"` - StateReplay func(context.Context, *types.TipSet, cid.Cid) (*api.ReplayResults, error) `perm:"read"` - StateGetActor func(context.Context, address.Address, *types.TipSet) (*types.Actor, error) `perm:"read"` - StateReadState func(context.Context, *types.Actor, *types.TipSet) (*api.ActorState, error) `perm:"read"` - StatePledgeCollateral func(context.Context, *types.TipSet) (types.BigInt, error) `perm:"read"` - StateWaitMsg func(context.Context, cid.Cid) (*api.MsgWait, error) `perm:"read"` - StateListMiners func(context.Context, *types.TipSet) ([]address.Address, error) `perm:"read"` - StateListActors func(context.Context, *types.TipSet) ([]address.Address, error) `perm:"read"` - StateMarketBalance func(context.Context, address.Address, *types.TipSet) (actors.StorageParticipantBalance, error) `perm:"read"` - StateMarketParticipants func(context.Context, *types.TipSet) (map[string]actors.StorageParticipantBalance, error) `perm:"read"` - StateMarketDeals func(context.Context, *types.TipSet) (map[string]actors.OnChainDeal, error) `perm:"read"` - StateMarketStorageDeal func(context.Context, uint64, *types.TipSet) (*actors.OnChainDeal, error) `perm:"read"` - StateLookupID func(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) `perm:"read"` - StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"` - StateGetReceipt func(context.Context, cid.Cid, *types.TipSet) (*types.MessageReceipt, error) `perm:"read"` - StateMinerSectorCount func(context.Context, address.Address, *types.TipSet) (api.MinerSectors, error) `perm:"read"` - StateListMessages func(ctx context.Context, match *types.Message, ts *types.TipSet, toht uint64) ([]cid.Cid, error) `perm:"read"` + StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"` + StateMinerSectors func(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` + StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"` + StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) `perm:"read"` + StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"` + StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) `perm:"read"` + StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) `perm:"read"` + StateMinerPartitions func(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) `perm:"read"` + StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"` + StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) `perm:"read"` + StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` + StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` + StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` + StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` + StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` + StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"` + StateSectorPartition func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"` + StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"` + StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"` + StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"` + StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"` + StateMsgGasCost func(context.Context, cid.Cid, types.TipSetKey) (*api.MsgGasCost, error) `perm:"read"` + StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"` + StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` + StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` + StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` + StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"` + StateMarketParticipants func(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"` + StateMarketDeals func(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) `perm:"read"` + StateMarketStorageDeal func(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) `perm:"read"` + StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` + StateAccountKey func(context.Context, address.Address, types.TipSetKey) (address.Address, error) `perm:"read"` + StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"` + StateGetReceipt func(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"` + StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"` + StateListMessages func(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` + StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"` + StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` + StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"` + StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"` + StateNetworkVersion func(context.Context, types.TipSetKey) (stnetwork.Version, error) `perm:"read"` - MarketEnsureAvailable func(context.Context, address.Address, types.BigInt) error `perm:"sign"` + MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` + MsigGetVested func(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) `perm:"read"` + MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` + MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` + MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` + MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` + MsigAddPropose func(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"` + MsigAddApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) `perm:"sign"` + MsigAddCancel func(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) `perm:"sign"` + MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` + MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` + MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"` - PaychGet func(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*api.ChannelInfo, error) `perm:"sign"` - PaychList func(context.Context) ([]address.Address, error) `perm:"read"` - PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"` - PaychClose func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"` - PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"` - PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"` - PaychVoucherCheck func(context.Context, *types.SignedVoucher) error `perm:"read"` - PaychVoucherCheckValid func(context.Context, address.Address, *types.SignedVoucher) error `perm:"read"` - PaychVoucherCheckSpendable func(context.Context, address.Address, *types.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"` - PaychVoucherAdd func(context.Context, address.Address, *types.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"` - PaychVoucherCreate func(context.Context, address.Address, types.BigInt, uint64) (*types.SignedVoucher, error) `perm:"sign"` - PaychVoucherList func(context.Context, address.Address) ([]*types.SignedVoucher, error) `perm:"write"` - PaychVoucherSubmit func(context.Context, address.Address, *types.SignedVoucher) (cid.Cid, error) `perm:"sign"` + MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` + + PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"` + PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"` + PaychAvailableFunds func(context.Context, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"` + PaychAvailableFundsByFromTo func(context.Context, address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"` + PaychList func(context.Context) ([]address.Address, error) `perm:"read"` + PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"` + PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"` + PaychCollect func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"` + PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"` + PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"` + PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"` + PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"` + PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"` + PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"` + PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*api.VoucherCreateResult, error) `perm:"sign"` + PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"` + PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"` } } -func (c *FullNodeStruct) StateMinerSectorCount(ctx context.Context, addr address.Address, ts *types.TipSet) (api.MinerSectors, error) { - return c.Internal.StateMinerSectorCount(ctx, addr, ts) +func (c *FullNodeStruct) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) { + return c.Internal.StateMinerSectorCount(ctx, addr, tsk) } type StorageMinerStruct struct { CommonStruct Internal struct { - ActorAddress func(context.Context) (address.Address, error) `perm:"read"` - ActorSectorSize func(context.Context, address.Address) (uint64, error) `perm:"read"` + ActorAddress func(context.Context) (address.Address, error) `perm:"read"` + ActorSectorSize func(context.Context, address.Address) (abi.SectorSize, error) `perm:"read"` + + MiningBase func(context.Context) (*types.TipSet, error) `perm:"read"` + + MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"` + MarketListDeals func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"` + MarketListRetrievalDeals func(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"` + MarketGetDealUpdates func(ctx context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"` + MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` + MarketSetAsk func(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"` + MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` + MarketSetRetrievalAsk func(ctx context.Context, rask *retrievalmarket.Ask) error `perm:"admin"` + MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"` + MarketListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"` + MarketDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"` PledgeSector func(context.Context) error `perm:"write"` - SectorsStatus func(context.Context, uint64) (api.SectorInfo, error) `perm:"read"` - SectorsList func(context.Context) ([]uint64, error) `perm:"read"` - SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"` - SectorsUpdate func(context.Context, uint64, api.SectorState) error `perm:"write"` + SectorsStatus func(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) `perm:"read"` + SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"` + SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"` + SectorStartSealing func(context.Context, abi.SectorNumber) error `perm:"write"` + SectorSetSealDelay func(context.Context, time.Duration) error `perm:"write"` + SectorGetSealDelay func(context.Context) (time.Duration, error) `perm:"read"` + SectorSetExpectedSealDuration func(context.Context, time.Duration) error `perm:"write"` + SectorGetExpectedSealDuration func(context.Context) (time.Duration, error) `perm:"read"` + SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"admin"` + SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"` + SectorMarkForUpgrade func(ctx context.Context, id abi.SectorNumber) error `perm:"admin"` - WorkerStats func(context.Context) (sectorbuilder.WorkerStats, error) `perm:"read"` + WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm + WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"` + WorkerJobs func(context.Context) (map[uint64][]storiface.WorkerJob, error) `perm:"admin"` - WorkerQueue func(ctx context.Context, cfg sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error) `perm:"admin"` // TODO: worker perm - WorkerDone func(ctx context.Context, task uint64, res sectorbuilder.SealRes) error `perm:"admin"` + SealingSchedDiag func(context.Context) (interface{}, error) `perm:"admin"` + + StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` + StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"` + StorageStat func(context.Context, stores.ID) (fsutil.FsStat, error) `perm:"admin"` + StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"` + StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"` + StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"` + StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, abi.RegisteredSealProof, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` + StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"` + StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, sealing stores.PathType) ([]stores.StorageInfo, error) `perm:"admin"` + StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"` + StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"` + StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"` + + DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"` + DealsList func(ctx context.Context) ([]api.MarketDeal, error) `perm:"read"` + DealsConsiderOnlineStorageDeals func(context.Context) (bool, error) `perm:"read"` + DealsSetConsiderOnlineStorageDeals func(context.Context, bool) error `perm:"admin"` + DealsConsiderOnlineRetrievalDeals func(context.Context) (bool, error) `perm:"read"` + DealsSetConsiderOnlineRetrievalDeals func(context.Context, bool) error `perm:"admin"` + DealsConsiderOfflineStorageDeals func(context.Context) (bool, error) `perm:"read"` + DealsSetConsiderOfflineStorageDeals func(context.Context, bool) error `perm:"admin"` + DealsConsiderOfflineRetrievalDeals func(context.Context) (bool, error) `perm:"read"` + DealsSetConsiderOfflineRetrievalDeals func(context.Context, bool) error `perm:"admin"` + DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"read"` + DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"admin"` + + StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` + + PiecesListPieces func(ctx context.Context) ([]cid.Cid, error) `perm:"read"` + PiecesListCidInfos func(ctx context.Context) ([]cid.Cid, error) `perm:"read"` + PiecesGetPieceInfo func(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"` + PiecesGetCIDInfo func(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"` } } -func (c *CommonStruct) AuthVerify(ctx context.Context, token string) ([]api.Permission, error) { +type WorkerStruct struct { + Internal struct { + // TODO: lower perms + + Version func(context.Context) (build.Version, error) `perm:"admin"` + + TaskTypes func(context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"` + Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"` + Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"` + + AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) `perm:"admin"` + SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"` + SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"` + SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"` + SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"` + FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"` + ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"` + Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` + MoveStorage func(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error `perm:"admin"` + StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` + + UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"` + ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) `perm:"admin"` + + Fetch func(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error `perm:"admin"` + + Closing func(context.Context) (<-chan struct{}, error) `perm:"admin"` + } +} + +// CommonStruct + +func (c *CommonStruct) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) { return c.Internal.AuthVerify(ctx, token) } -func (c *CommonStruct) AuthNew(ctx context.Context, perms []api.Permission) ([]byte, error) { +func (c *CommonStruct) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { return c.Internal.AuthNew(ctx, perms) } +func (c *CommonStruct) NetPubsubScores(ctx context.Context) ([]api.PubsubScore, error) { + return c.Internal.NetPubsubScores(ctx) +} + func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { return c.Internal.NetConnectedness(ctx, pid) } @@ -186,6 +384,30 @@ func (c *CommonStruct) NetDisconnect(ctx context.Context, p peer.ID) error { return c.Internal.NetDisconnect(ctx, p) } +func (c *CommonStruct) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { + return c.Internal.NetFindPeer(ctx, p) +} + +func (c *CommonStruct) NetAutoNatStatus(ctx context.Context) (api.NatInfo, error) { + return c.Internal.NetAutoNatStatus(ctx) +} + +func (c *CommonStruct) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { + return c.Internal.NetBandwidthStats(ctx) +} + +func (c *CommonStruct) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { + return c.Internal.NetBandwidthStatsByPeer(ctx) +} + +func (c *CommonStruct) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { + return c.Internal.NetBandwidthStatsByProtocol(ctx) +} + +func (c *CommonStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { + return c.Internal.NetAgentVersion(ctx, p) +} + // ID implements API.ID func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) { return c.Internal.ID(ctx) @@ -196,25 +418,52 @@ func (c *CommonStruct) Version(ctx context.Context) (api.Version, error) { return c.Internal.Version(ctx) } +func (c *CommonStruct) LogList(ctx context.Context) ([]string, error) { + return c.Internal.LogList(ctx) +} + +func (c *CommonStruct) LogSetLevel(ctx context.Context, group, level string) error { + return c.Internal.LogSetLevel(ctx, group, level) +} + +func (c *CommonStruct) Shutdown(ctx context.Context) error { + return c.Internal.Shutdown(ctx) +} + +func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) { + return c.Internal.Closing(ctx) +} + +// FullNodeStruct + func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) { return c.Internal.ClientListImports(ctx) } -func (c *FullNodeStruct) ClientImport(ctx context.Context, path string) (cid.Cid, error) { - return c.Internal.ClientImport(ctx, path) +func (c *FullNodeStruct) ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error { + return c.Internal.ClientRemoveImport(ctx, importID) +} + +func (c *FullNodeStruct) ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) { + return c.Internal.ClientImport(ctx, ref) } func (c *FullNodeStruct) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) { return c.Internal.ClientHasLocal(ctx, root) } -func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) { - return c.Internal.ClientFindData(ctx, root) +func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) { + return c.Internal.ClientFindData(ctx, root, piece) } -func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, data cid.Cid, addr address.Address, miner address.Address, price types.BigInt, blocksDuration uint64) (*cid.Cid, error) { - return c.Internal.ClientStartDeal(ctx, data, addr, miner, price, blocksDuration) +func (c *FullNodeStruct) ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) { + return c.Internal.ClientMinerQueryOffer(ctx, miner, root, piece) } + +func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { + return c.Internal.ClientStartDeal(ctx, params) +} + func (c *FullNodeStruct) ClientGetDealInfo(ctx context.Context, deal cid.Cid) (*api.DealInfo, error) { return c.Internal.ClientGetDealInfo(ctx, deal) } @@ -223,47 +472,118 @@ func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, e return c.Internal.ClientListDeals(ctx) } -func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, path string) error { - return c.Internal.ClientRetrieve(ctx, order, path) +func (c *FullNodeStruct) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) { + return c.Internal.ClientGetDealUpdates(ctx) } -func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*types.SignedStorageAsk, error) { +func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error { + return c.Internal.ClientRetrieve(ctx, order, ref) +} + +func (c *FullNodeStruct) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + return c.Internal.ClientRetrieveWithEvents(ctx, order, ref) +} + +func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) { return c.Internal.ClientQueryAsk(ctx, p, miner) } +func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) { + return c.Internal.ClientCalcCommP(ctx, inpath) +} -func (c *FullNodeStruct) MpoolPending(ctx context.Context, ts *types.TipSet) ([]*types.SignedMessage, error) { - return c.Internal.MpoolPending(ctx, ts) +func (c *FullNodeStruct) ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error { + return c.Internal.ClientGenCar(ctx, ref, outpath) +} + +func (c *FullNodeStruct) ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) { + return c.Internal.ClientDealSize(ctx, root) +} + +func (c *FullNodeStruct) ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) { + return c.Internal.ClientListDataTransfers(ctx) +} + +func (c *FullNodeStruct) ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { + return c.Internal.ClientDataTransferUpdates(ctx) +} + +func (c *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error { + return c.Internal.ClientRetrieveTryRestartInsufficientFunds(ctx, paymentChannel) +} + +func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk) +} + +func (c *FullNodeStruct) GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.GasEstimateFeeCap(ctx, msg, maxqueueblks, tsk) +} + +func (c *FullNodeStruct) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { + return c.Internal.GasEstimateMessageGas(ctx, msg, spec, tsk) +} + +func (c *FullNodeStruct) GasEstimateGasLimit(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (int64, error) { + return c.Internal.GasEstimateGasLimit(ctx, msg, tsk) +} + +func (c *FullNodeStruct) MpoolGetConfig(ctx context.Context) (*types.MpoolConfig, error) { + return c.Internal.MpoolGetConfig(ctx) +} + +func (c *FullNodeStruct) MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error { + return c.Internal.MpoolSetConfig(ctx, cfg) +} + +func (c *FullNodeStruct) MpoolSelect(ctx context.Context, tsk types.TipSetKey, tq float64) ([]*types.SignedMessage, error) { + return c.Internal.MpoolSelect(ctx, tsk, tq) +} + +func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { + return c.Internal.MpoolPending(ctx, tsk) +} + +func (c *FullNodeStruct) MpoolClear(ctx context.Context, local bool) error { + return c.Internal.MpoolClear(ctx, local) } func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { return c.Internal.MpoolPush(ctx, smsg) } -func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error) { - return c.Internal.MpoolPushMessage(ctx, msg) +func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { + return c.Internal.MpoolPushMessage(ctx, msg, spec) } func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) { return c.Internal.MpoolSub(ctx) } -func (c *FullNodeStruct) MinerCreateBlock(ctx context.Context, addr address.Address, base *types.TipSet, ticket *types.Ticket, eproof *types.EPostProof, msgs []*types.SignedMessage, height, ts uint64) (*types.BlockMsg, error) { - return c.Internal.MinerCreateBlock(ctx, addr, base, ticket, eproof, msgs, height, ts) +func (c *FullNodeStruct) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { + return c.Internal.MinerGetBaseInfo(ctx, maddr, epoch, tsk) +} + +func (c *FullNodeStruct) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) (*types.BlockMsg, error) { + return c.Internal.MinerCreateBlock(ctx, bt) } func (c *FullNodeStruct) ChainHead(ctx context.Context) (*types.TipSet, error) { return c.Internal.ChainHead(ctx) } -func (c *FullNodeStruct) ChainGetRandomness(ctx context.Context, pts types.TipSetKey, round int64) ([]byte, error) { - return c.Internal.ChainGetRandomness(ctx, pts, round) +func (c *FullNodeStruct) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return c.Internal.ChainGetRandomnessFromTickets(ctx, tsk, personalization, randEpoch, entropy) } -func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h uint64, ts *types.TipSet) (*types.TipSet, error) { - return c.Internal.ChainGetTipSetByHeight(ctx, h, ts) +func (c *FullNodeStruct) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + return c.Internal.ChainGetRandomnessFromBeacon(ctx, tsk, personalization, randEpoch, entropy) } -func (c *FullNodeStruct) WalletNew(ctx context.Context, typ string) (address.Address, error) { +func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + return c.Internal.ChainGetTipSetByHeight(ctx, h, tsk) +} + +func (c *FullNodeStruct) WalletNew(ctx context.Context, typ crypto.SigType) (address.Address, error) { return c.Internal.WalletNew(ctx, typ) } @@ -279,7 +599,7 @@ func (c *FullNodeStruct) WalletBalance(ctx context.Context, a address.Address) ( return c.Internal.WalletBalance(ctx, a) } -func (c *FullNodeStruct) WalletSign(ctx context.Context, k address.Address, msg []byte) (*types.Signature, error) { +func (c *FullNodeStruct) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) { return c.Internal.WalletSign(ctx, k, msg) } @@ -287,6 +607,10 @@ func (c *FullNodeStruct) WalletSignMessage(ctx context.Context, k address.Addres return c.Internal.WalletSignMessage(ctx, k, msg) } +func (c *FullNodeStruct) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) { + return c.Internal.WalletVerify(ctx, k, msg, sig) +} + func (c *FullNodeStruct) WalletDefaultAddress(ctx context.Context) (address.Address, error) { return c.Internal.WalletDefaultAddress(ctx) } @@ -303,6 +627,10 @@ func (c *FullNodeStruct) WalletImport(ctx context.Context, ki *types.KeyInfo) (a return c.Internal.WalletImport(ctx, ki) } +func (c *FullNodeStruct) WalletDelete(ctx context.Context, addr address.Address) error { + return c.Internal.WalletDelete(ctx, addr) +} + func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { return c.Internal.MpoolGetNonce(ctx, addr) } @@ -327,7 +655,7 @@ func (c *FullNodeStruct) ChainGetParentMessages(ctx context.Context, b cid.Cid) return c.Internal.ChainGetParentMessages(ctx, b) } -func (c *FullNodeStruct) ChainNotify(ctx context.Context) (<-chan []*store.HeadChange, error) { +func (c *FullNodeStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { return c.Internal.ChainNotify(ctx) } @@ -335,19 +663,31 @@ func (c *FullNodeStruct) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, return c.Internal.ChainReadObj(ctx, obj) } -func (c *FullNodeStruct) ChainSetHead(ctx context.Context, ts *types.TipSet) error { - return c.Internal.ChainSetHead(ctx, ts) +func (c *FullNodeStruct) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { + return c.Internal.ChainDeleteObj(ctx, obj) +} + +func (c *FullNodeStruct) ChainHasObj(ctx context.Context, o cid.Cid) (bool, error) { + return c.Internal.ChainHasObj(ctx, o) +} + +func (c *FullNodeStruct) ChainStatObj(ctx context.Context, obj, base cid.Cid) (api.ObjStat, error) { + return c.Internal.ChainStatObj(ctx, obj, base) +} + +func (c *FullNodeStruct) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error { + return c.Internal.ChainSetHead(ctx, tsk) } func (c *FullNodeStruct) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { return c.Internal.ChainGetGenesis(ctx) } -func (c *FullNodeStruct) ChainTipSetWeight(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { - return c.Internal.ChainTipSetWeight(ctx, ts) +func (c *FullNodeStruct) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.ChainTipSetWeight(ctx, tsk) } -func (c *FullNodeStruct) ChainGetNode(ctx context.Context, p string) (interface{}, error) { +func (c *FullNodeStruct) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { return c.Internal.ChainGetNode(ctx, p) } @@ -355,6 +695,18 @@ func (c *FullNodeStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*type return c.Internal.ChainGetMessage(ctx, mc) } +func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { + return c.Internal.ChainGetPath(ctx, from, to) +} + +func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, iom bool, tsk types.TipSetKey) (<-chan []byte, error) { + return c.Internal.ChainExport(ctx, nroots, iom, tsk) +} + +func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) { + return c.Internal.BeaconGetEntry(ctx, epoch) +} + func (c *FullNodeStruct) SyncState(ctx context.Context) (*api.SyncState, error) { return c.Internal.SyncState(ctx) } @@ -367,107 +719,252 @@ func (c *FullNodeStruct) SyncIncomingBlocks(ctx context.Context) (<-chan *types. return c.Internal.SyncIncomingBlocks(ctx) } +func (c *FullNodeStruct) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error { + return c.Internal.SyncCheckpoint(ctx, tsk) +} + func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error { return c.Internal.SyncMarkBad(ctx, bcid) } -func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, ts *types.TipSet) ([]*api.ChainSectorInfo, error) { - return c.Internal.StateMinerSectors(ctx, addr, ts) +func (c *FullNodeStruct) SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error { + return c.Internal.SyncUnmarkBad(ctx, bcid) } -func (c *FullNodeStruct) StateMinerProvingSet(ctx context.Context, addr address.Address, ts *types.TipSet) ([]*api.ChainSectorInfo, error) { - return c.Internal.StateMinerProvingSet(ctx, addr, ts) +func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) { + return c.Internal.SyncCheckBad(ctx, bcid) } -func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address, ts *types.TipSet) (api.MinerPower, error) { - return c.Internal.StateMinerPower(ctx, a, ts) +func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) { + return c.Internal.StateNetworkName(ctx) } -func (c *FullNodeStruct) StateMinerWorker(ctx context.Context, m address.Address, ts *types.TipSet) (address.Address, error) { - return c.Internal.StateMinerWorker(ctx, m, ts) +func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, sectorNos *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return c.Internal.StateMinerSectors(ctx, addr, sectorNos, tsk) } -func (c *FullNodeStruct) StateMinerPeerID(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error) { - return c.Internal.StateMinerPeerID(ctx, m, ts) +func (c *FullNodeStruct) StateMinerActiveSectors(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { + return c.Internal.StateMinerActiveSectors(ctx, addr, tsk) } -func (c *FullNodeStruct) StateMinerElectionPeriodStart(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error) { - return c.Internal.StateMinerElectionPeriodStart(ctx, actor, ts) +func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { + return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk) } -func (c *FullNodeStruct) StateMinerSectorSize(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error) { - return c.Internal.StateMinerSectorSize(ctx, actor, ts) +func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { + return c.Internal.StateMinerPower(ctx, a, tsk) } -func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, ts *types.TipSet) (*types.MessageReceipt, error) { - return c.Internal.StateCall(ctx, msg, ts) +func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { + return c.Internal.StateMinerInfo(ctx, actor, tsk) } -func (c *FullNodeStruct) StateReplay(ctx context.Context, ts *types.TipSet, mc cid.Cid) (*api.ReplayResults, error) { - return c.Internal.StateReplay(ctx, ts, mc) +func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, actor address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { + return c.Internal.StateMinerDeadlines(ctx, actor, tsk) } -func (c *FullNodeStruct) StateGetActor(ctx context.Context, actor address.Address, ts *types.TipSet) (*types.Actor, error) { - return c.Internal.StateGetActor(ctx, actor, ts) +func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) { + return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk) } -func (c *FullNodeStruct) StateReadState(ctx context.Context, act *types.Actor, ts *types.TipSet) (*api.ActorState, error) { - return c.Internal.StateReadState(ctx, act, ts) +func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + return c.Internal.StateMinerFaults(ctx, actor, tsk) } -func (c *FullNodeStruct) StatePledgeCollateral(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { - return c.Internal.StatePledgeCollateral(ctx, ts) +func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.ChainEpoch, endTsk types.TipSetKey) ([]*api.Fault, error) { + return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk) } -func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid) (*api.MsgWait, error) { - return c.Internal.StateWaitMsg(ctx, msgc) -} -func (c *FullNodeStruct) StateListMiners(ctx context.Context, ts *types.TipSet) ([]address.Address, error) { - return c.Internal.StateListMiners(ctx, ts) +func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + return c.Internal.StateMinerRecoveries(ctx, actor, tsk) } -func (c *FullNodeStruct) StateListActors(ctx context.Context, ts *types.TipSet) ([]address.Address, error) { - return c.Internal.StateListActors(ctx, ts) +func (c *FullNodeStruct) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.StateMinerPreCommitDepositForPower(ctx, maddr, pci, tsk) } -func (c *FullNodeStruct) StateMarketBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (actors.StorageParticipantBalance, error) { - return c.Internal.StateMarketBalance(ctx, addr, ts) +func (c *FullNodeStruct) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.StateMinerInitialPledgeCollateral(ctx, maddr, pci, tsk) } -func (c *FullNodeStruct) StateMarketParticipants(ctx context.Context, ts *types.TipSet) (map[string]actors.StorageParticipantBalance, error) { - return c.Internal.StateMarketParticipants(ctx, ts) +func (c *FullNodeStruct) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk) } -func (c *FullNodeStruct) StateMarketDeals(ctx context.Context, ts *types.TipSet) (map[string]actors.OnChainDeal, error) { - return c.Internal.StateMarketDeals(ctx, ts) +func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { + return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk) } -func (c *FullNodeStruct) StateMarketStorageDeal(ctx context.Context, dealid uint64, ts *types.TipSet) (*actors.OnChainDeal, error) { - return c.Internal.StateMarketStorageDeal(ctx, dealid, ts) +func (c *FullNodeStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { + return c.Internal.StateSectorGetInfo(ctx, maddr, n, tsk) } -func (c *FullNodeStruct) StateLookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - return c.Internal.StateLookupID(ctx, addr, ts) +func (c *FullNodeStruct) StateSectorExpiration(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorExpiration, error) { + return c.Internal.StateSectorExpiration(ctx, maddr, n, tsk) +} + +func (c *FullNodeStruct) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) { + return c.Internal.StateSectorPartition(ctx, maddr, sectorNumber, tok) +} + +func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) { + return c.Internal.StateCall(ctx, msg, tsk) +} + +func (c *FullNodeStruct) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid.Cid) (*api.InvocResult, error) { + return c.Internal.StateReplay(ctx, tsk, mc) +} + +func (c *FullNodeStruct) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + return c.Internal.StateGetActor(ctx, actor, tsk) +} + +func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.ActorState, error) { + return c.Internal.StateReadState(ctx, addr, tsk) +} + +func (c *FullNodeStruct) StateMsgGasCost(ctx context.Context, msgc cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) { + return c.Internal.StateMsgGasCost(ctx, msgc, tsk) +} + +func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) { + return c.Internal.StateWaitMsg(ctx, msgc, confidence) +} + +func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) { + return c.Internal.StateSearchMsg(ctx, msgc) +} + +func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + return c.Internal.StateListMiners(ctx, tsk) +} + +func (c *FullNodeStruct) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + return c.Internal.StateListActors(ctx, tsk) +} + +func (c *FullNodeStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { + return c.Internal.StateMarketBalance(ctx, addr, tsk) +} + +func (c *FullNodeStruct) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) { + return c.Internal.StateMarketParticipants(ctx, tsk) +} + +func (c *FullNodeStruct) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) { + return c.Internal.StateMarketDeals(ctx, tsk) +} + +func (c *FullNodeStruct) StateMarketStorageDeal(ctx context.Context, dealid abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { + return c.Internal.StateMarketStorageDeal(ctx, dealid, tsk) +} + +func (c *FullNodeStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + return c.Internal.StateLookupID(ctx, addr, tsk) +} + +func (c *FullNodeStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + return c.Internal.StateAccountKey(ctx, addr, tsk) } func (c *FullNodeStruct) StateChangedActors(ctx context.Context, olnstate cid.Cid, newstate cid.Cid) (map[string]types.Actor, error) { return c.Internal.StateChangedActors(ctx, olnstate, newstate) } -func (c *FullNodeStruct) StateGetReceipt(ctx context.Context, msg cid.Cid, ts *types.TipSet) (*types.MessageReceipt, error) { - return c.Internal.StateGetReceipt(ctx, msg, ts) +func (c *FullNodeStruct) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { + return c.Internal.StateGetReceipt(ctx, msg, tsk) } -func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *types.Message, ts *types.TipSet, toht uint64) ([]cid.Cid, error) { - return c.Internal.StateListMessages(ctx, match, ts, toht) +func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) { + return c.Internal.StateListMessages(ctx, match, tsk, toht) } -func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr address.Address, amt types.BigInt) error { - return c.Internal.MarketEnsureAvailable(ctx, addr, amt) +func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) { + return c.Internal.StateCompute(ctx, height, msgs, tsk) } -func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*api.ChannelInfo, error) { - return c.Internal.PaychGet(ctx, from, to, ensureFunds) +func (c *FullNodeStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + return c.Internal.StateVerifiedClientStatus(ctx, addr, tsk) +} + +func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { + return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk) +} + +func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { + return c.Internal.StateCirculatingSupply(ctx, tsk) +} + +func (c *FullNodeStruct) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (stnetwork.Version, error) { + return c.Internal.StateNetworkVersion(ctx, tsk) +} + +func (c *FullNodeStruct) MsigGetAvailableBalance(ctx context.Context, a address.Address, tsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.MsigGetAvailableBalance(ctx, a, tsk) +} + +func (c *FullNodeStruct) MsigGetVested(ctx context.Context, a address.Address, sTsk types.TipSetKey, eTsk types.TipSetKey) (types.BigInt, error) { + return c.Internal.MsigGetVested(ctx, a, sTsk, eTsk) +} + +func (c *FullNodeStruct) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { + return c.Internal.MsigCreate(ctx, req, addrs, duration, val, src, gp) +} + +func (c *FullNodeStruct) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + return c.Internal.MsigPropose(ctx, msig, to, amt, src, method, params) +} + +func (c *FullNodeStruct) MsigApprove(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + return c.Internal.MsigApprove(ctx, msig, txID, proposer, to, amt, src, method, params) +} + +func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params) +} + +func (c *FullNodeStruct) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + return c.Internal.MsigAddPropose(ctx, msig, src, newAdd, inc) +} + +func (c *FullNodeStruct) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + return c.Internal.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc) +} + +func (c *FullNodeStruct) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { + return c.Internal.MsigAddCancel(ctx, msig, src, txID, newAdd, inc) +} + +func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd) +} + +func (c *FullNodeStruct) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + return c.Internal.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd) +} + +func (c *FullNodeStruct) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + return c.Internal.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd) +} + +func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { + return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt) +} + +func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) { + return c.Internal.PaychGet(ctx, from, to, amt) +} + +func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) { + return c.Internal.PaychGetWaitReady(ctx, sentinel) +} + +func (c *FullNodeStruct) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { + return c.Internal.PaychAvailableFunds(ctx, ch) +} + +func (c *FullNodeStruct) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) { + return c.Internal.PaychAvailableFundsByFromTo(ctx, from, to) } func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) { @@ -478,28 +975,32 @@ func (c *FullNodeStruct) PaychStatus(ctx context.Context, pch address.Address) ( return c.Internal.PaychStatus(ctx, pch) } -func (c *FullNodeStruct) PaychVoucherCheckValid(ctx context.Context, addr address.Address, sv *types.SignedVoucher) error { +func (c *FullNodeStruct) PaychVoucherCheckValid(ctx context.Context, addr address.Address, sv *paych.SignedVoucher) error { return c.Internal.PaychVoucherCheckValid(ctx, addr, sv) } -func (c *FullNodeStruct) PaychVoucherCheckSpendable(ctx context.Context, addr address.Address, sv *types.SignedVoucher, secret []byte, proof []byte) (bool, error) { +func (c *FullNodeStruct) PaychVoucherCheckSpendable(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) { return c.Internal.PaychVoucherCheckSpendable(ctx, addr, sv, secret, proof) } -func (c *FullNodeStruct) PaychVoucherAdd(ctx context.Context, addr address.Address, sv *types.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) { +func (c *FullNodeStruct) PaychVoucherAdd(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) { return c.Internal.PaychVoucherAdd(ctx, addr, sv, proof, minDelta) } -func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*types.SignedVoucher, error) { +func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*api.VoucherCreateResult, error) { return c.Internal.PaychVoucherCreate(ctx, pch, amt, lane) } -func (c *FullNodeStruct) PaychVoucherList(ctx context.Context, pch address.Address) ([]*types.SignedVoucher, error) { +func (c *FullNodeStruct) PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) { return c.Internal.PaychVoucherList(ctx, pch) } -func (c *FullNodeStruct) PaychClose(ctx context.Context, a address.Address) (cid.Cid, error) { - return c.Internal.PaychClose(ctx, a) +func (c *FullNodeStruct) PaychSettle(ctx context.Context, a address.Address) (cid.Cid, error) { + return c.Internal.PaychSettle(ctx, a) +} + +func (c *FullNodeStruct) PaychCollect(ctx context.Context, a address.Address) (cid.Cid, error) { + return c.Internal.PaychCollect(ctx, a) } func (c *FullNodeStruct) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) { @@ -510,15 +1011,21 @@ func (c *FullNodeStruct) PaychNewPayment(ctx context.Context, from, to address.A return c.Internal.PaychNewPayment(ctx, from, to, vouchers) } -func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *types.SignedVoucher) (cid.Cid, error) { - return c.Internal.PaychVoucherSubmit(ctx, ch, sv) +func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) { + return c.Internal.PaychVoucherSubmit(ctx, ch, sv, secret, proof) } +// StorageMinerStruct + func (c *StorageMinerStruct) ActorAddress(ctx context.Context) (address.Address, error) { return c.Internal.ActorAddress(ctx) } -func (c *StorageMinerStruct) ActorSectorSize(ctx context.Context, addr address.Address) (uint64, error) { +func (c *StorageMinerStruct) MiningBase(ctx context.Context) (*types.TipSet, error) { + return c.Internal.MiningBase(ctx) +} + +func (c *StorageMinerStruct) ActorSectorSize(ctx context.Context, addr address.Address) (abi.SectorSize, error) { return c.Internal.ActorSectorSize(ctx, addr) } @@ -527,12 +1034,12 @@ func (c *StorageMinerStruct) PledgeSector(ctx context.Context) error { } // Get the status of a given sector by ID -func (c *StorageMinerStruct) SectorsStatus(ctx context.Context, sid uint64) (api.SectorInfo, error) { - return c.Internal.SectorsStatus(ctx, sid) +func (c *StorageMinerStruct) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + return c.Internal.SectorsStatus(ctx, sid, showOnChainInfo) } // List all staged sectors -func (c *StorageMinerStruct) SectorsList(ctx context.Context) ([]uint64, error) { +func (c *StorageMinerStruct) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) { return c.Internal.SectorsList(ctx) } @@ -540,22 +1047,289 @@ func (c *StorageMinerStruct) SectorsRefs(ctx context.Context) (map[string][]api. return c.Internal.SectorsRefs(ctx) } -func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id uint64, state api.SectorState) error { +func (c *StorageMinerStruct) SectorStartSealing(ctx context.Context, number abi.SectorNumber) error { + return c.Internal.SectorStartSealing(ctx, number) +} + +func (c *StorageMinerStruct) SectorSetSealDelay(ctx context.Context, delay time.Duration) error { + return c.Internal.SectorSetSealDelay(ctx, delay) +} + +func (c *StorageMinerStruct) SectorGetSealDelay(ctx context.Context) (time.Duration, error) { + return c.Internal.SectorGetSealDelay(ctx) +} + +func (c *StorageMinerStruct) SectorSetExpectedSealDuration(ctx context.Context, delay time.Duration) error { + return c.Internal.SectorSetExpectedSealDuration(ctx, delay) +} + +func (c *StorageMinerStruct) SectorGetExpectedSealDuration(ctx context.Context) (time.Duration, error) { + return c.Internal.SectorGetExpectedSealDuration(ctx) +} + +func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNumber, state api.SectorState) error { return c.Internal.SectorsUpdate(ctx, id, state) } -func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (sectorbuilder.WorkerStats, error) { +func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.SectorNumber) error { + return c.Internal.SectorRemove(ctx, number) +} + +func (c *StorageMinerStruct) SectorMarkForUpgrade(ctx context.Context, number abi.SectorNumber) error { + return c.Internal.SectorMarkForUpgrade(ctx, number) +} + +func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error { + return c.Internal.WorkerConnect(ctx, url) +} + +func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uint64]storiface.WorkerStats, error) { return c.Internal.WorkerStats(ctx) } -func (c *StorageMinerStruct) WorkerQueue(ctx context.Context, cfg sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error) { - return c.Internal.WorkerQueue(ctx, cfg) +func (c *StorageMinerStruct) WorkerJobs(ctx context.Context) (map[uint64][]storiface.WorkerJob, error) { + return c.Internal.WorkerJobs(ctx) } -func (c *StorageMinerStruct) WorkerDone(ctx context.Context, task uint64, res sectorbuilder.SealRes) error { - return c.Internal.WorkerDone(ctx, task, res) +func (c *StorageMinerStruct) SealingSchedDiag(ctx context.Context) (interface{}, error) { + return c.Internal.SealingSchedDiag(ctx) +} + +func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st fsutil.FsStat) error { + return c.Internal.StorageAttach(ctx, si, st) +} + +func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType, primary bool) error { + return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary) +} + +func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error { + return c.Internal.StorageDropSector(ctx, storageId, s, ft) +} + +func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, spt abi.RegisteredSealProof, allowFetch bool) ([]stores.SectorStorageInfo, error) { + return c.Internal.StorageFindSector(ctx, si, types, spt, allowFetch) +} + +func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { + return c.Internal.StorageList(ctx) +} + +func (c *StorageMinerStruct) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { + return c.Internal.StorageLocal(ctx) +} + +func (c *StorageMinerStruct) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { + return c.Internal.StorageStat(ctx, id) +} + +func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (stores.StorageInfo, error) { + return c.Internal.StorageInfo(ctx, id) +} + +func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, pt stores.PathType) ([]stores.StorageInfo, error) { + return c.Internal.StorageBestAlloc(ctx, allocate, spt, pt) +} + +func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error { + return c.Internal.StorageReportHealth(ctx, id, report) +} + +func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error { + return c.Internal.StorageLock(ctx, sector, read, write) +} + +func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) { + return c.Internal.StorageTryLock(ctx, sector, read, write) +} + +func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error { + return c.Internal.MarketImportDealData(ctx, propcid, path) +} + +func (c *StorageMinerStruct) MarketListDeals(ctx context.Context) ([]api.MarketDeal, error) { + return c.Internal.MarketListDeals(ctx) +} + +func (c *StorageMinerStruct) MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) { + return c.Internal.MarketListRetrievalDeals(ctx) +} + +func (c *StorageMinerStruct) MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) { + return c.Internal.MarketGetDealUpdates(ctx) +} + +func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) { + return c.Internal.MarketListIncompleteDeals(ctx) +} + +func (c *StorageMinerStruct) MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error { + return c.Internal.MarketSetAsk(ctx, price, verifiedPrice, duration, minPieceSize, maxPieceSize) +} + +func (c *StorageMinerStruct) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) { + return c.Internal.MarketGetAsk(ctx) +} + +func (c *StorageMinerStruct) MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error { + return c.Internal.MarketSetRetrievalAsk(ctx, rask) +} + +func (c *StorageMinerStruct) MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) { + return c.Internal.MarketGetRetrievalAsk(ctx) +} + +func (c *StorageMinerStruct) MarketListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) { + return c.Internal.MarketListDataTransfers(ctx) +} + +func (c *StorageMinerStruct) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) { + return c.Internal.MarketDataTransferUpdates(ctx) +} + +func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error { + return c.Internal.DealsImportData(ctx, dealPropCid, file) +} + +func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]api.MarketDeal, error) { + return c.Internal.DealsList(ctx) +} + +func (c *StorageMinerStruct) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) { + return c.Internal.DealsConsiderOnlineStorageDeals(ctx) +} + +func (c *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error { + return c.Internal.DealsSetConsiderOnlineStorageDeals(ctx, b) +} + +func (c *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) { + return c.Internal.DealsConsiderOnlineRetrievalDeals(ctx) +} + +func (c *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error { + return c.Internal.DealsSetConsiderOnlineRetrievalDeals(ctx, b) +} + +func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) { + return c.Internal.DealsPieceCidBlocklist(ctx) +} + +func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error { + return c.Internal.DealsSetPieceCidBlocklist(ctx, cids) +} + +func (c *StorageMinerStruct) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) { + return c.Internal.DealsConsiderOfflineStorageDeals(ctx) +} + +func (c *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error { + return c.Internal.DealsSetConsiderOfflineStorageDeals(ctx, b) +} + +func (c *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) { + return c.Internal.DealsConsiderOfflineRetrievalDeals(ctx) +} + +func (c *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error { + return c.Internal.DealsSetConsiderOfflineRetrievalDeals(ctx, b) +} + +func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error { + return c.Internal.StorageAddLocal(ctx, path) +} + +func (c *StorageMinerStruct) PiecesListPieces(ctx context.Context) ([]cid.Cid, error) { + return c.Internal.PiecesListPieces(ctx) +} + +func (c *StorageMinerStruct) PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) { + return c.Internal.PiecesListCidInfos(ctx) +} + +func (c *StorageMinerStruct) PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) { + return c.Internal.PiecesGetPieceInfo(ctx, pieceCid) +} + +func (c *StorageMinerStruct) PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) { + return c.Internal.PiecesGetCIDInfo(ctx, payloadCid) +} + +// WorkerStruct + +func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) { + return w.Internal.Version(ctx) +} + +func (w *WorkerStruct) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) { + return w.Internal.TaskTypes(ctx) +} + +func (w *WorkerStruct) Paths(ctx context.Context) ([]stores.StoragePath, error) { + return w.Internal.Paths(ctx) +} + +func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) { + return w.Internal.Info(ctx) +} + +func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) { + return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) +} + +func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { + return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces) +} + +func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, p1o storage.PreCommit1Out) (storage.SectorCids, error) { + return w.Internal.SealPreCommit2(ctx, sector, p1o) +} + +func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { + return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids) +} + +func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { + return w.Internal.SealCommit2(ctx, sector, c1o) +} + +func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { + return w.Internal.FinalizeSector(ctx, sector, keepUnsealed) +} + +func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { + return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree) +} + +func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error { + return w.Internal.Remove(ctx, sector) +} + +func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error { + return w.Internal.MoveStorage(ctx, sector, types) +} + +func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error { + return w.Internal.StorageAddLocal(ctx, path) +} + +func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { + return w.Internal.UnsealPiece(ctx, id, index, size, randomness, c) +} + +func (w *WorkerStruct) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + return w.Internal.ReadPiece(ctx, writer, id, index, size) +} + +func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { + return w.Internal.Fetch(ctx, id, fileType, ptype, am) +} + +func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) { + return w.Internal.Closing(ctx) } var _ api.Common = &CommonStruct{} var _ api.FullNode = &FullNodeStruct{} var _ api.StorageMiner = &StorageMinerStruct{} +var _ api.WorkerAPI = &WorkerStruct{} diff --git a/api/apistruct/struct_test.go b/api/apistruct/struct_test.go new file mode 100644 index 000000000..9f5f58360 --- /dev/null +++ b/api/apistruct/struct_test.go @@ -0,0 +1,9 @@ +package apistruct + +import "testing" + +func TestPermTags(t *testing.T) { + _ = PermissionedFullAPI(&FullNodeStruct{}) + _ = PermissionedStorMinerAPI(&StorageMinerStruct{}) + _ = PermissionedWorkerAPI(&WorkerStruct{}) +} diff --git a/api/cbor_gen.go b/api/cbor_gen.go index 8d309a6cc..7ab575b28 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -1,16 +1,17 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + package api import ( "fmt" "io" - "github.com/filecoin-project/lotus/chain/types" + abi "github.com/filecoin-project/go-state-types/abi" + paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - var _ = xerrors.Errorf func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { @@ -22,11 +23,17 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { return err } + scratch := make([]byte, 9) + // t.Channel (address.Address) (struct) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Channel")))); err != nil { + if len("Channel") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Channel\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Channel"))); err != nil { return err } - if _, err := w.Write([]byte("Channel")); err != nil { + if _, err := io.WriteString(w, string("Channel")); err != nil { return err } @@ -34,33 +41,39 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { return err } - // t.ChannelMessage (cid.Cid) (struct) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("ChannelMessage")))); err != nil { + // t.WaitSentinel (cid.Cid) (struct) + if len("WaitSentinel") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WaitSentinel\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil { return err } - if _, err := w.Write([]byte("ChannelMessage")); err != nil { + if _, err := io.WriteString(w, string("WaitSentinel")); err != nil { return err } - if t.ChannelMessage == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(w, *t.ChannelMessage); err != nil { - return xerrors.Errorf("failed to write cid field t.ChannelMessage: %w", err) - } + if err := cbg.WriteCidBuf(scratch, w, t.WaitSentinel); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err) } - // t.Vouchers ([]*types.SignedVoucher) (slice) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Vouchers")))); err != nil { + // t.Vouchers ([]*paych.SignedVoucher) (slice) + if len("Vouchers") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Vouchers\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Vouchers"))); err != nil { return err } - if _, err := w.Write([]byte("Vouchers")); err != nil { + if _, err := io.WriteString(w, string("Vouchers")); err != nil { return err } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Vouchers)))); err != nil { + if len(t.Vouchers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Vouchers was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil { return err } for _, v := range t.Vouchers { @@ -72,9 +85,12 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { } func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = PaymentInfo{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -82,114 +98,85 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type map") } - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("PaymentInfo: map struct too large (%d)", extra) } var name string + n := extra - // t.Channel (address.Address) (struct) + for i := uint64(0); i < n; i++ { - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - name = string(sval) - } - - if name != "Channel" { - return fmt.Errorf("expected struct map entry %s to be Channel", name) - } - - { - - if err := t.Channel.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ChannelMessage (cid.Cid) (struct) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - name = string(sval) - } - - if name != "ChannelMessage" { - return fmt.Errorf("expected struct map entry %s to be ChannelMessage", name) - } - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { return err } - } else { - c, err := cbg.ReadCid(br) + name = string(sval) + } + + switch name { + // t.Channel (address.Address) (struct) + case "Channel": + + { + + if err := t.Channel.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Channel: %w", err) + } + + } + // t.WaitSentinel (cid.Cid) (struct) + case "WaitSentinel": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err) + } + + t.WaitSentinel = c + + } + // t.Vouchers ([]*paych.SignedVoucher) (slice) + case "Vouchers": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { - return xerrors.Errorf("failed to read cid field t.ChannelMessage: %w", err) + return err } - t.ChannelMessage = &c + if extra > cbg.MaxLength { + return fmt.Errorf("t.Vouchers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Vouchers = make([]*paych.SignedVoucher, extra) + } + + for i := 0; i < int(extra); i++ { + + var v paych.SignedVoucher + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Vouchers[i] = &v + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) } - - } - // t.Vouchers ([]*types.SignedVoucher) (slice) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - name = string(sval) - } - - if name != "Vouchers" { - return fmt.Errorf("expected struct map entry %s to be Vouchers", name) - } - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Vouchers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Vouchers = make([]*types.SignedVoucher, extra) - } - for i := 0; i < int(extra); i++ { - - var v types.SignedVoucher - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Vouchers[i] = &v } return nil } - func (t *SealedRef) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) @@ -199,48 +186,66 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { return err } - // t.SectorID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("SectorID")))); err != nil { + scratch := make([]byte, 9) + + // t.SectorID (abi.SectorNumber) (uint64) + if len("SectorID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SectorID"))); err != nil { return err } - if _, err := w.Write([]byte("SectorID")); err != nil { + if _, err := io.WriteString(w, string("SectorID")); err != nil { return err } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorID))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { return err } - // t.Offset (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Offset")))); err != nil { + // t.Offset (abi.PaddedPieceSize) (uint64) + if len("Offset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Offset\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Offset"))); err != nil { return err } - if _, err := w.Write([]byte("Offset")); err != nil { + if _, err := io.WriteString(w, string("Offset")); err != nil { return err } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Offset))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { return err } - // t.Size (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Size")))); err != nil { + // t.Size (abi.UnpaddedPieceSize) (uint64) + if len("Size") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Size\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Size"))); err != nil { return err } - if _, err := w.Write([]byte("Size")); err != nil { + if _, err := io.WriteString(w, string("Size")); err != nil { return err } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Size))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil { return err } + return nil } func (t *SealedRef) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = SealedRef{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -248,84 +253,78 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type map") } - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("SealedRef: map struct too large (%d)", extra) } var name string + n := extra - // t.SectorID (uint64) (uint64) + for i := uint64(0); i < n; i++ { - { - sval, err := cbg.ReadString(br) - if err != nil { - return err + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) } - name = string(sval) - } + switch name { + // t.SectorID (abi.SectorNumber) (uint64) + case "SectorID": - if name != "SectorID" { - return fmt.Errorf("expected struct map entry %s to be SectorID", name) - } + { - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = uint64(extra) - // t.Offset (uint64) (uint64) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorID = abi.SectorNumber(extra) - { - sval, err := cbg.ReadString(br) - if err != nil { - return err + } + // t.Offset (abi.PaddedPieceSize) (uint64) + case "Offset": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.Size (abi.UnpaddedPieceSize) (uint64) + case "Size": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = abi.UnpaddedPieceSize(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) } - - name = string(sval) } - if name != "Offset" { - return fmt.Errorf("expected struct map entry %s to be Offset", name) - } - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Offset = uint64(extra) - // t.Size (uint64) (uint64) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - name = string(sval) - } - - if name != "Size" { - return fmt.Errorf("expected struct map entry %s to be Size", name) - } - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Size = uint64(extra) return nil } - func (t *SealedRefs) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) @@ -335,15 +334,25 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { return err } + scratch := make([]byte, 9) + // t.Refs ([]api.SealedRef) (slice) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Refs")))); err != nil { + if len("Refs") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Refs\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Refs"))); err != nil { return err } - if _, err := w.Write([]byte("Refs")); err != nil { + if _, err := io.WriteString(w, string("Refs")); err != nil { return err } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Refs)))); err != nil { + if len(t.Refs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Refs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Refs))); err != nil { return err } for _, v := range t.Refs { @@ -355,9 +364,12 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { } func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = SealedRefs{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -365,50 +377,354 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type map") } - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("SealedRefs: map struct too large (%d)", extra) } var name string + n := extra - // t.Refs ([]api.SealedRef) (slice) + for i := uint64(0); i < n; i++ { - { - sval, err := cbg.ReadString(br) - if err != nil { - return err + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) } - name = string(sval) - } + switch name { + // t.Refs ([]api.SealedRef) (slice) + case "Refs": - if name != "Refs" { - return fmt.Errorf("expected struct map entry %s to be Refs", name) - } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } + if extra > cbg.MaxLength { + return fmt.Errorf("t.Refs: array too large (%d)", extra) + } - if extra > cbg.MaxLength { - return fmt.Errorf("t.Refs: array too large (%d)", extra) - } + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Refs = make([]SealedRef, extra) - } - for i := 0; i < int(extra); i++ { + if extra > 0 { + t.Refs = make([]SealedRef, extra) + } - var v SealedRef - if err := v.UnmarshalCBOR(br); err != nil { - return err + for i := 0; i < int(extra); i++ { + + var v SealedRef + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Refs[i] = v + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *SealTicket) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Value (abi.SealRandomness) (slice) + if len("Value") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Value\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Value")); err != nil { + return err + } + + if len(t.Value) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Value was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil { + return err + } + + if _, err := w.Write(t.Value[:]); err != nil { + return err + } + + // t.Epoch (abi.ChainEpoch) (int64) + if len("Epoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Epoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Epoch")); err != nil { + return err + } + + if t.Epoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + return nil +} + +func (t *SealTicket) UnmarshalCBOR(r io.Reader) error { + *t = SealTicket{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SealTicket: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Value (abi.SealRandomness) (slice) + case "Value": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Value: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Value = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Value[:]); err != nil { + return err + } + // t.Epoch (abi.ChainEpoch) (int64) + case "Epoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *SealSeed) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Value (abi.InteractiveSealRandomness) (slice) + if len("Value") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Value\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Value")); err != nil { + return err + } + + if len(t.Value) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Value was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil { + return err + } + + if _, err := w.Write(t.Value[:]); err != nil { + return err + } + + // t.Epoch (abi.ChainEpoch) (int64) + if len("Epoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Epoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Epoch")); err != nil { + return err + } + + if t.Epoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + return nil +} + +func (t *SealSeed) UnmarshalCBOR(r io.Reader) error { + *t = SealSeed{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SealSeed: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Value (abi.InteractiveSealRandomness) (slice) + case "Value": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Value: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Value = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Value[:]); err != nil { + return err + } + // t.Epoch (abi.ChainEpoch) (int64) + case "Epoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) } - - t.Refs[i] = v } return nil diff --git a/api/client/client.go b/api/client/client.go index 0e19f65c2..cd915acf0 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -1,28 +1,36 @@ package client import ( - "github.com/filecoin-project/lotus/api/apistruct" + "context" "net/http" + "net/url" + "path" + "time" + + "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/lib/jsonrpc" + "github.com/filecoin-project/lotus/api/apistruct" + "github.com/filecoin-project/lotus/lib/rpcenc" ) // NewCommonRPC creates a new http jsonrpc client. -func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) { +func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) { var res apistruct.CommonStruct - closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", []interface{}{ &res.Internal, - }, requestHeader) + }, + requestHeader, + ) return &res, closer, err } // NewFullNodeRPC creates a new http jsonrpc client. -func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) { +func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) { var res apistruct.FullNodeStruct - closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", []interface{}{ &res.CommonStruct.Internal, &res.Internal, @@ -31,14 +39,46 @@ func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonr return &res, closer, err } -// NewStorageMinerRPC creates a new http jsonrpc client for storage miner -func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMiner, jsonrpc.ClientCloser, error) { +// NewStorageMinerRPC creates a new http jsonrpc client for miner +func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) { var res apistruct.StorageMinerStruct - closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", []interface{}{ &res.CommonStruct.Internal, &res.Internal, - }, requestHeader) + }, + requestHeader, + opts..., + ) + + return &res, closer, err +} + +func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) { + u, err := url.Parse(addr) + if err != nil { + return nil, nil, err + } + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + } + ///rpc/v0 -> /rpc/streams/v0/push + + u.Path = path.Join(u.Path, "../streams/v0/push") + + var res apistruct.WorkerStruct + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", + []interface{}{ + &res.Internal, + }, + requestHeader, + rpcenc.ReaderParamEncoder(u.String()), + jsonrpc.WithNoReconnect(), + jsonrpc.WithTimeout(30*time.Second), + ) return &res, closer, err } diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go new file mode 100644 index 000000000..ced536cc3 --- /dev/null +++ b/api/docgen/docgen.go @@ -0,0 +1,420 @@ +package main + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/token" + "reflect" + "sort" + "strings" + "time" + "unicode" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-filestore" + metrics "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/multiformats/go-multiaddr" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-multistore" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/apistruct" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +var ExampleValues = map[reflect.Type]interface{}{ + reflect.TypeOf(auth.Permission("")): auth.Permission("write"), + reflect.TypeOf(""): "string value", + reflect.TypeOf(uint64(42)): uint64(42), + reflect.TypeOf(byte(7)): byte(7), + reflect.TypeOf([]byte{}): []byte("byte array"), +} + +func addExample(v interface{}) { + ExampleValues[reflect.TypeOf(v)] = v +} + +func init() { + c, err := cid.Decode("bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4") + if err != nil { + panic(err) + } + + ExampleValues[reflect.TypeOf(c)] = c + + c2, err := cid.Decode("bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve") + if err != nil { + panic(err) + } + + tsk := types.NewTipSetKey(c, c2) + + ExampleValues[reflect.TypeOf(tsk)] = tsk + + addr, err := address.NewIDAddress(1234) + if err != nil { + panic(err) + } + + ExampleValues[reflect.TypeOf(addr)] = addr + + pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf") + if err != nil { + panic(err) + } + addExample(pid) + addExample(&pid) + + addExample(bitfield.NewFromSet([]uint64{5})) + addExample(abi.RegisteredSealProof_StackedDrg32GiBV1) + addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1) + addExample(abi.ChainEpoch(10101)) + addExample(crypto.SigTypeBLS) + addExample(int64(9)) + addExample(12.3) + addExample(123) + addExample(uintptr(0)) + addExample(abi.MethodNum(1)) + addExample(exitcode.ExitCode(0)) + addExample(crypto.DomainSeparationTag_ElectionProofProduction) + addExample(true) + addExample(abi.UnpaddedPieceSize(1024)) + addExample(abi.UnpaddedPieceSize(1024).Padded()) + addExample(abi.DealID(5432)) + addExample(filestore.StatusFileChanged) + addExample(abi.SectorNumber(9)) + addExample(abi.SectorSize(32 * 1024 * 1024 * 1024)) + addExample(api.MpoolChange(0)) + addExample(network.Connected) + addExample(dtypes.NetworkName("lotus")) + addExample(api.SyncStateStage(1)) + addExample(build.FullAPIVersion) + addExample(api.PCHInbound) + addExample(time.Minute) + addExample(datatransfer.TransferID(3)) + addExample(datatransfer.Ongoing) + addExample(multistore.StoreID(50)) + addExample(retrievalmarket.ClientEventDealAccepted) + addExample(retrievalmarket.DealStatusNew) + addExample(network.ReachabilityPublic) + addExample(build.NewestNetworkVersion) + addExample(&types.ExecutionTrace{ + Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message), + MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), + }) + addExample(map[string]types.Actor{ + "t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor), + }) + addExample(map[string]api.MarketDeal{ + "t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), + }) + addExample(map[string]api.MarketBalance{ + "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance), + }) + addExample(map[string]*pubsub.TopicScoreSnapshot{ + "/blocks": { + TimeInMesh: time.Minute, + FirstMessageDeliveries: 122, + MeshMessageDeliveries: 1234, + InvalidMessageDeliveries: 3, + }, + }) + addExample(map[string]metrics.Stats{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + RateIn: 100, + RateOut: 50, + TotalIn: 174000, + TotalOut: 12500, + }, + }) + addExample(map[protocol.ID]metrics.Stats{ + "/fil/hello/1.0.0": { + RateIn: 100, + RateOut: 50, + TotalIn: 174000, + TotalOut: 12500, + }, + }) + + maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior") + if err != nil { + panic(err) + } + + // because reflect.TypeOf(maddr) returns the concrete type... + ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr + +} + +func exampleValue(t, parent reflect.Type) interface{} { + v, ok := ExampleValues[t] + if ok { + return v + } + + switch t.Kind() { + case reflect.Slice: + out := reflect.New(t).Elem() + reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t))) + return out.Interface() + case reflect.Chan: + return exampleValue(t.Elem(), nil) + case reflect.Struct: + es := exampleStruct(t, parent) + v := reflect.ValueOf(es).Elem().Interface() + ExampleValues[t] = v + return v + case reflect.Array: + out := reflect.New(t).Elem() + for i := 0; i < t.Len(); i++ { + out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t))) + } + return out.Interface() + + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct { + es := exampleStruct(t.Elem(), t) + //ExampleValues[t] = es + return es + } + case reflect.Interface: + return struct{}{} + } + + panic(fmt.Sprintf("No example value for type: %s", t)) +} + +func exampleStruct(t, parent reflect.Type) interface{} { + ns := reflect.New(t) + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type == parent { + continue + } + if strings.Title(f.Name) == f.Name { + ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t))) + } + } + + return ns.Interface() +} + +type Visitor struct { + Methods map[string]ast.Node +} + +func (v *Visitor) Visit(node ast.Node) ast.Visitor { + st, ok := node.(*ast.TypeSpec) + if !ok { + return v + } + + if st.Name.Name != "FullNode" { + return nil + } + + iface := st.Type.(*ast.InterfaceType) + for _, m := range iface.Methods.List { + if len(m.Names) > 0 { + v.Methods[m.Names[0].Name] = m + } + } + + return v +} + +const noComment = "There are not yet any comments for this method." + +func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments) + if err != nil { + fmt.Println("parse error: ", err) + } + + ap := pkgs["api"] + + f := ap.Files["api/api_full.go"] + + cmap := ast.NewCommentMap(fset, f, f.Comments) + + v := &Visitor{make(map[string]ast.Node)} + ast.Walk(v, pkgs["api"]) + + groupDocs := make(map[string]string) + out := make(map[string]string) + for mn, node := range v.Methods { + cs := cmap.Filter(node).Comments() + if len(cs) == 0 { + out[mn] = noComment + } else { + for _, c := range cs { + if strings.HasPrefix(c.Text(), "MethodGroup:") { + parts := strings.Split(c.Text(), "\n") + groupName := strings.TrimSpace(parts[0][12:]) + comment := strings.Join(parts[1:], "\n") + groupDocs[groupName] = comment + + break + } + } + + last := cs[len(cs)-1].Text() + if !strings.HasPrefix(last, "MethodGroup:") { + out[mn] = last + } else { + out[mn] = noComment + } + } + } + return out, groupDocs +} + +type MethodGroup struct { + GroupName string + Header string + Methods []*Method +} + +type Method struct { + Comment string + Name string + InputExample string + ResponseExample string +} + +func methodGroupFromName(mn string) string { + i := strings.IndexFunc(mn[1:], func(r rune) bool { + return unicode.IsUpper(r) + }) + if i < 0 { + return "" + } + return mn[:i+1] +} + +func main() { + + comments, groupComments := parseApiASTInfo() + + groups := make(map[string]*MethodGroup) + + var api struct{ api.FullNode } + t := reflect.TypeOf(api) + for i := 0; i < t.NumMethod(); i++ { + m := t.Method(i) + + groupName := methodGroupFromName(m.Name) + + g, ok := groups[groupName] + if !ok { + g = new(MethodGroup) + g.Header = groupComments[groupName] + g.GroupName = groupName + groups[groupName] = g + } + + var args []interface{} + ft := m.Func.Type() + for j := 2; j < ft.NumIn(); j++ { + inp := ft.In(j) + args = append(args, exampleValue(inp, nil)) + } + + v, err := json.MarshalIndent(args, "", " ") + if err != nil { + panic(err) + } + + outv := exampleValue(ft.Out(0), nil) + + ov, err := json.MarshalIndent(outv, "", " ") + if err != nil { + panic(err) + } + + g.Methods = append(g.Methods, &Method{ + Name: m.Name, + Comment: comments[m.Name], + InputExample: string(v), + ResponseExample: string(ov), + }) + } + + var groupslice []*MethodGroup + for _, g := range groups { + groupslice = append(groupslice, g) + } + + sort.Slice(groupslice, func(i, j int) bool { + return groupslice[i].GroupName < groupslice[j].GroupName + }) + + fmt.Printf("# Groups\n") + + for _, g := range groupslice { + fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName) + for _, method := range g.Methods { + fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name) + } + } + + permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal) + commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal) + + for _, g := range groupslice { + g := g + fmt.Printf("## %s\n", g.GroupName) + fmt.Printf("%s\n\n", g.Header) + + sort.Slice(g.Methods, func(i, j int) bool { + return g.Methods[i].Name < g.Methods[j].Name + }) + + for _, m := range g.Methods { + fmt.Printf("### %s\n", m.Name) + fmt.Printf("%s\n\n", m.Comment) + + meth, ok := permStruct.FieldByName(m.Name) + if !ok { + meth, ok = commonPermStruct.FieldByName(m.Name) + if !ok { + panic("no perms for method: " + m.Name) + } + } + + perms := meth.Tag.Get("perm") + + fmt.Printf("Perms: %s\n\n", perms) + + if strings.Count(m.InputExample, "\n") > 0 { + fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample) + } else { + fmt.Printf("Inputs: `%s`\n\n", m.InputExample) + } + + if strings.Count(m.ResponseExample, "\n") > 0 { + fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample) + } else { + fmt.Printf("Response: `%s`\n\n", m.ResponseExample) + } + } + } +} diff --git a/api/test/blockminer.go b/api/test/blockminer.go new file mode 100644 index 000000000..6b28a5794 --- /dev/null +++ b/api/test/blockminer.go @@ -0,0 +1,56 @@ +package test + +import ( + "context" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/miner" +) + +type BlockMiner struct { + ctx context.Context + t *testing.T + miner TestStorageNode + blocktime time.Duration + mine int64 + nulls int64 + done chan struct{} +} + +func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner { + return &BlockMiner{ + ctx: ctx, + t: t, + miner: miner, + blocktime: blocktime, + mine: int64(1), + done: make(chan struct{}), + } +} + +func (bm *BlockMiner) MineBlocks() { + time.Sleep(time.Second) + go func() { + defer close(bm.done) + for atomic.LoadInt64(&bm.mine) == 1 { + time.Sleep(bm.blocktime) + nulls := atomic.SwapInt64(&bm.nulls, 0) + if err := bm.miner.MineOne(bm.ctx, miner.MineReq{ + InjectNulls: abi.ChainEpoch(nulls), + Done: func(bool, abi.ChainEpoch, error) {}, + }); err != nil { + bm.t.Error(err) + } + } + }() +} + +func (bm *BlockMiner) Stop() { + atomic.AddInt64(&bm.mine, -1) + fmt.Println("shutting down mining") + <-bm.done +} diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go new file mode 100644 index 000000000..f58f1ff6e --- /dev/null +++ b/api/test/ccupgrade.go @@ -0,0 +1,116 @@ +package test + +import ( + "context" + "fmt" + "os" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/impl" +) + +func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + + ctx := context.Background() + n, sn := b(t, 1, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) == 1 { + time.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { + t.Error(err) + } + } + }() + + maddr, err := miner.ActorAddress(ctx) + if err != nil { + t.Fatal(err) + } + + CC := abi.SectorNumber(GenesisPreseals + 1) + Upgraded := CC + 1 + + pledgeSectors(t, ctx, miner, 1, 0, nil) + + sl, err := miner.SectorsList(ctx) + if err != nil { + t.Fatal(err) + } + if len(sl) != 1 { + t.Fatal("expected 1 sector") + } + + if sl[0] != CC { + t.Fatal("bad") + } + + { + si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(si.Expiration)) + } + + if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil { + t.Fatal(err) + } + + makeDeal(t, ctx, 6, client, miner, false, false) + + // Validate upgrade + + { + exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) + require.NoError(t, err) + require.NotNil(t, exp) + require.Greater(t, 50000, int(exp.OnTime)) + } + { + exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(exp.OnTime)) + } + + dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + // Sector should expire. + for { + // Wait for the sector to expire. + status, err := miner.SectorsStatus(ctx, CC, true) + require.NoError(t, err) + if status.OnTime == 0 && status.Early == 0 { + break + } + t.Log("waiting for sector to expire") + // wait one deadline per loop. + time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime) + } + + fmt.Println("shutting down mining") + atomic.AddInt64(&mine, -1) + <-done +} diff --git a/api/test/deals.go b/api/test/deals.go index b9ff2611f..12cd0607a 100644 --- a/api/test/deals.go +++ b/api/test/deals.go @@ -8,27 +8,47 @@ import ( "math/rand" "os" "path/filepath" + "sync/atomic" "testing" "time" - logging "github.com/ipfs/go-log" + "github.com/stretchr/testify/require" + "github.com/ipfs/go-cid" + files "github.com/ipfs/go-ipfs-files" + logging "github.com/ipfs/go-log/v2" + "github.com/ipld/go-car" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/miner" + dag "github.com/ipfs/go-merkledag" + dstest "github.com/ipfs/go-merkledag/test" + unixfile "github.com/ipfs/go-unixfs/file" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/impl" + ipld "github.com/ipfs/go-ipld-format" ) +var MineNext = miner.MineReq{ + InjectNulls: 0, + Done: func(bool, abi.ChainEpoch, error) {}, +} + func init() { logging.SetAllLoggers(logging.LevelInfo) build.InsecurePoStValidation = true } -func TestDealFlow(t *testing.T, b APIBuilder) { - os.Setenv("BELLMAN_NO_GPU", "1") +func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") ctx := context.Background() - n, sn := b(t, 1, []int{0}) + n, sn := b(t, 1, OneMiner) client := n[0].FullNode.(*impl.FullNodeAPI) miner := sn[0] @@ -42,8 +62,67 @@ func TestDealFlow(t *testing.T, b APIBuilder) { } time.Sleep(time.Second) - data := make([]byte, 1000) - rand.New(rand.NewSource(5)).Read(data) + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) == 1 { + time.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { + t.Error(err) + } + } + }() + + makeDeal(t, ctx, 6, client, miner, carExport, fastRet) + + atomic.AddInt64(&mine, -1) + fmt.Println("shutting down mining") + <-done +} + +func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + + ctx := context.Background() + n, sn := b(t, 1, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + + go func() { + defer close(done) + for atomic.LoadInt64(&mine) == 1 { + time.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { + t.Error(err) + } + } + }() + + makeDeal(t, ctx, 6, client, miner, false, false) + makeDeal(t, ctx, 7, client, miner, false, false) + + atomic.AddInt64(&mine, -1) + fmt.Println("shutting down mining") + <-done +} + +func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNodeAPI, miner TestStorageNode, carExport, fastRet bool) { + data := make([]byte, 1600) + rand.New(rand.NewSource(int64(rseed))).Read(data) r := bytes.NewReader(data) fcid, err := client.ClientImportLocal(ctx, r) @@ -51,37 +130,180 @@ func TestDealFlow(t *testing.T, b APIBuilder) { t.Fatal(err) } - maddr, err := miner.ActorAddress(ctx) + fmt.Println("FILE CID: ", fcid) + + deal := startDeal(t, ctx, miner, client, fcid, fastRet) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + waitDealSealed(t, ctx, miner, client, deal, false) + + // Retrieval + info, err := client.ClientGetDealInfo(ctx, *deal) + require.NoError(t, err) + + testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data) +} + +func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + + ctx := context.Background() + n, sn := b(t, 1, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + for atomic.LoadInt64(&mine) == 1 { + time.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { + t.Error(err) + } + } + }() + + data := make([]byte, 1600) + rand.New(rand.NewSource(int64(8))).Read(data) + + r := bytes.NewReader(data) + fcid, err := client.ClientImportLocal(ctx, r) if err != nil { t.Fatal(err) } fmt.Println("FILE CID: ", fcid) - mine := true + deal := startDeal(t, ctx, miner, client, fcid, true) + + waitDealPublished(t, ctx, miner, deal) + fmt.Println("deal published, retrieving") + // Retrieval + info, err := client.ClientGetDealInfo(ctx, *deal) + require.NoError(t, err) + + testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data) + atomic.AddInt64(&mine, -1) + fmt.Println("shutting down mining") + <-done +} + +func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + + ctx := context.Background() + n, sn := b(t, 1, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + mine := int64(1) done := make(chan struct{}) go func() { defer close(done) - for mine { - time.Sleep(time.Second) - fmt.Println("mining a block now") - if err := sn[0].MineOne(ctx); err != nil { + for atomic.LoadInt64(&mine) == 1 { + time.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { t.Error(err) } } }() - addr, err := client.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) + + { + data1 := make([]byte, 800) + rand.New(rand.NewSource(int64(3))).Read(data1) + r := bytes.NewReader(data1) + + fcid1, err := client.ClientImportLocal(ctx, r) + if err != nil { + t.Fatal(err) + } + + data2 := make([]byte, 800) + rand.New(rand.NewSource(int64(9))).Read(data2) + r2 := bytes.NewReader(data2) + + fcid2, err := client.ClientImportLocal(ctx, r2) + if err != nil { + t.Fatal(err) + } + + deal1 := startDeal(t, ctx, miner, client, fcid1, true) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + waitDealSealed(t, ctx, miner, client, deal1, true) + + deal2 := startDeal(t, ctx, miner, client, fcid2, true) + + time.Sleep(time.Second) + waitDealSealed(t, ctx, miner, client, deal2, false) + + // Retrieval + info, err := client.ClientGetDealInfo(ctx, *deal2) + require.NoError(t, err) + + rf, _ := miner.SectorsRefs(ctx) + fmt.Printf("refs: %+v\n", rf) + + testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2) } - deal, err := client.ClientStartDeal(ctx, fcid, addr, maddr, types.NewInt(40000000), 100) + + atomic.AddInt64(&mine, -1) + fmt.Println("shutting down mining") + <-done +} + +func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, fcid cid.Cid, fastRet bool) *cid.Cid { + maddr, err := miner.ActorAddress(ctx) if err != nil { t.Fatal(err) } - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) + addr, err := client.WalletDefaultAddress(ctx) + if err != nil { + t.Fatal(err) + } + deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{ + Data: &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: fcid, + }, + Wallet: addr, + Miner: maddr, + EpochPrice: types.NewInt(1000000), + MinBlocksDuration: uint64(build.MinDealDuration), + FastRetrieval: fastRet, + }) + if err != nil { + t.Fatalf("%+v", err) + } + return deal +} + +func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, deal *cid.Cid, noseal bool) { loop: for { di, err := client.ClientGetDealInfo(ctx, *deal) @@ -89,23 +311,73 @@ loop: t.Fatal(err) } switch di.State { - case api.DealRejected: + case storagemarket.StorageDealSealing: + if noseal { + return + } + startSealingWaiting(t, ctx, miner) + case storagemarket.StorageDealProposalRejected: t.Fatal("deal rejected") - case api.DealFailed: + case storagemarket.StorageDealFailing: t.Fatal("deal failed") - case api.DealError: - t.Fatal("deal errored") - case api.DealComplete: + case storagemarket.StorageDealError: + t.Fatal("deal errored", di.Message) + case storagemarket.StorageDealActive: fmt.Println("COMPLETE", di) break loop } - fmt.Println("Deal state: ", api.DealStates[di.State]) + fmt.Println("Deal state: ", storagemarket.DealStates[di.State]) time.Sleep(time.Second / 2) } +} - // Retrieval +func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) { + subCtx, cancel := context.WithCancel(ctx) + defer cancel() + updates, err := miner.MarketGetDealUpdates(subCtx) + if err != nil { + t.Fatal(err) + } + for { + select { + case <-ctx.Done(): + t.Fatal("context timeout") + case di := <-updates: + if deal.Equals(di.ProposalCid) { + switch di.State { + case storagemarket.StorageDealProposalRejected: + t.Fatal("deal rejected") + case storagemarket.StorageDealFailing: + t.Fatal("deal failed") + case storagemarket.StorageDealError: + t.Fatal("deal errored", di.Message) + case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive: + fmt.Println("COMPLETE", di) + return + } + fmt.Println("Deal state: ", storagemarket.DealStates[di.State]) + } + } + } +} - offers, err := client.ClientFindData(ctx, fcid) +func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) { + snums, err := miner.SectorsList(ctx) + require.NoError(t, err) + + for _, snum := range snums { + si, err := miner.SectorsStatus(ctx, snum, false) + require.NoError(t, err) + + t.Logf("Sector state: %s", si.State) + if si.State == api.SectorState(sealing.WaitDeals) { + require.NoError(t, miner.SectorStartSealing(ctx, snum)) + } + } +} + +func testRetrieval(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { + offers, err := client.ClientFindData(ctx, fcid, piece) if err != nil { t.Fatal(err) } @@ -118,16 +390,22 @@ loop: if err != nil { t.Fatal(err) } - defer os.RemoveAll(rpath) + defer os.RemoveAll(rpath) //nolint:errcheck caddr, err := client.WalletDefaultAddress(ctx) if err != nil { t.Fatal(err) } - err = client.ClientRetrieve(ctx, offers[0].Order(caddr), filepath.Join(rpath, "ret")) - if err != nil { - t.Fatalf("%+v", err) + ref := &api.FileRef{ + Path: filepath.Join(rpath, "ret"), + IsCAR: carExport, + } + updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) + for update := range updates { + if update.Err != "" { + t.Fatalf("%v", err) + } } rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret")) @@ -135,11 +413,41 @@ loop: t.Fatal(err) } + if carExport { + rdata = extractCarData(t, ctx, rdata, rpath) + } + if !bytes.Equal(rdata, data) { t.Fatal("wrong data retrieved") } - - mine = false - fmt.Println("shutting down mining") - <-done +} + +func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte { + bserv := dstest.Bserv() + ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) + if err != nil { + t.Fatal(err) + } + b, err := bserv.GetBlock(ctx, ch.Roots[0]) + if err != nil { + t.Fatal(err) + } + nd, err := ipld.Decode(b) + if err != nil { + t.Fatal(err) + } + dserv := dag.NewDAGService(bserv) + fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) + if err != nil { + t.Fatal(err) + } + outPath := filepath.Join(rpath, "retLoadedCAR") + if err := files.WriteTo(fil, outPath); err != nil { + t.Fatal(err) + } + rdata, err = ioutil.ReadFile(outPath) + if err != nil { + t.Fatal(err) + } + return rdata } diff --git a/api/test/mining.go b/api/test/mining.go index 77f4ec6c3..e19774a76 100644 --- a/api/test/mining.go +++ b/api/test/mining.go @@ -1,31 +1,207 @@ package test import ( + "bytes" "context" + "fmt" + "math/rand" + "os" + "sync/atomic" "testing" + "time" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/go-state-types/abi" "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node/impl" ) +//nolint:deadcode,varcheck +var log = logging.Logger("apitest") + func (ts *testSuite) testMining(t *testing.T) { ctx := context.Background() - apis, sn := ts.makeNodes(t, 1, []int{0}) + apis, sn := ts.makeNodes(t, 1, OneMiner) api := apis[0] - h1, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, uint64(0), h1.Height()) - newHeads, err := api.ChainNotify(ctx) require.NoError(t, err) - <-newHeads + initHead := (<-newHeads)[0] + baseHeight := initHead.Val.Height() - err = sn[0].MineOne(ctx) + h1, err := api.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, int64(h1.Height()), int64(baseHeight)) + + MineUntilBlock(ctx, t, apis[0], sn[0], nil) require.NoError(t, err) <-newHeads h2, err := api.ChainHead(ctx) require.NoError(t, err) - require.Equal(t, uint64(1), h2.Height()) + require.Greater(t, int64(h2.Height()), int64(h1.Height())) +} + +func (ts *testSuite) testMiningReal(t *testing.T) { + build.InsecurePoStValidation = false + defer func() { + build.InsecurePoStValidation = true + }() + + ctx := context.Background() + apis, sn := ts.makeNodes(t, 1, OneMiner) + api := apis[0] + + newHeads, err := api.ChainNotify(ctx) + require.NoError(t, err) + initHead := (<-newHeads)[0] + if initHead.Val.Height() != 2 { + <-newHeads + } + + h1, err := api.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, abi.ChainEpoch(2), h1.Height()) + + MineUntilBlock(ctx, t, apis[0], sn[0], nil) + require.NoError(t, err) + + <-newHeads + + h2, err := api.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, abi.ChainEpoch(3), h2.Height()) + + MineUntilBlock(ctx, t, apis[0], sn[0], nil) + require.NoError(t, err) + + <-newHeads + + h2, err = api.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, abi.ChainEpoch(4), h2.Height()) +} + +func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + + // test making a deal with a fresh miner, and see if it starts to mine + + ctx := context.Background() + n, sn := b(t, 1, []StorageMiner{ + {Full: 0, Preseal: PresealGenesis}, + {Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node + }) + client := n[0].FullNode.(*impl.FullNodeAPI) + provider := sn[1] + genesisMiner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := provider.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + + if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + + time.Sleep(time.Second) + + data := make([]byte, 600) + rand.New(rand.NewSource(5)).Read(data) + + r := bytes.NewReader(data) + fcid, err := client.ClientImportLocal(ctx, r) + if err != nil { + t.Fatal(err) + } + + fmt.Println("FILE CID: ", fcid) + + var mine int32 = 1 + done := make(chan struct{}) + minedTwo := make(chan struct{}) + + m2addr, err := sn[1].ActorAddress(context.TODO()) + if err != nil { + t.Fatal(err) + } + + go func() { + defer close(done) + + complChan := minedTwo + for atomic.LoadInt32(&mine) != 0 { + wait := make(chan int) + mdone := func(mined bool, _ abi.ChainEpoch, err error) { + n := 0 + if mined { + n = 1 + } + wait <- n + } + + if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil { + t.Error(err) + } + + if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil { + t.Error(err) + } + + expect := <-wait + expect += <-wait + + time.Sleep(blocktime) + if expect == 0 { + // null block + continue + } + + var nodeOneMined bool + for _, node := range sn { + mb, err := node.MiningBase(ctx) + if err != nil { + t.Error(err) + return + } + + for _, b := range mb.Blocks() { + if b.Miner == m2addr { + nodeOneMined = true + break + } + } + + } + + if nodeOneMined && complChan != nil { + close(complChan) + complChan = nil + } + + } + }() + + deal := startDeal(t, ctx, provider, client, fcid, false) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + + waitDealSealed(t, ctx, provider, client, deal, false) + + <-minedTwo + + atomic.StoreInt32(&mine, 0) + fmt.Println("shutting down mining") + <-done } diff --git a/api/test/paych.go b/api/test/paych.go new file mode 100644 index 000000000..36eb2c256 --- /dev/null +++ b/api/test/paych.go @@ -0,0 +1,259 @@ +package test + +import ( + "context" + "fmt" + "os" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/actors/builtin/paych" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/events/state" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" +) + +func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + + ctx := context.Background() + n, sn := b(t, 2, OneMiner) + + paymentCreator := n[0] + paymentReceiver := n[1] + miner := sn[0] + + // get everyone connected + addrs, err := paymentCreator.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := paymentReceiver.NetConnect(ctx, addrs); err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrs); err != nil { + t.Fatal(err) + } + + // start mining blocks + bm := NewBlockMiner(ctx, t, miner, blocktime) + bm.MineBlocks() + + // send some funds to register the receiver + receiverAddr, err := paymentReceiver.WalletNew(ctx, wallet.ActSigType("secp256k1")) + if err != nil { + t.Fatal(err) + } + + SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) + + // setup the payment channel + createrAddr, err := paymentCreator.WalletDefaultAddress(ctx) + if err != nil { + t.Fatal(err) + } + + channelAmt := int64(100000) + channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt)) + if err != nil { + t.Fatal(err) + } + + channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel) + if err != nil { + t.Fatal(err) + } + + // allocate three lanes + var lanes []uint64 + for i := 0; i < 3; i++ { + lane, err := paymentCreator.PaychAllocateLane(ctx, channel) + if err != nil { + t.Fatal(err) + } + lanes = append(lanes, lane) + } + + // Make two vouchers each for each lane, then save on the other side + // Note that the voucher with a value of 2000 has a higher nonce, so it + // supersedes the voucher with a value of 1000 + for _, lane := range lanes { + vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane) + if err != nil { + t.Fatal(err) + } + if vouch1.Voucher == nil { + t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall)) + } + vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane) + if err != nil { + t.Fatal(err) + } + if vouch2.Voucher == nil { + t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall)) + } + delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000)) + if err != nil { + t.Fatal(err) + } + if !delta1.Equals(abi.NewTokenAmount(1000)) { + t.Fatal("voucher didn't have the right amount") + } + delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000)) + if err != nil { + t.Fatal(err) + } + if !delta2.Equals(abi.NewTokenAmount(1000)) { + t.Fatal("voucher didn't have the right amount") + } + } + + // settle the payment channel + settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel) + if err != nil { + t.Fatal(err) + } + + res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle") + if res.Receipt.ExitCode != 0 { + t.Fatal("Unable to settle payment channel") + } + + // wait for the receiver to submit their vouchers + ev := events.NewEvents(ctx, paymentCreator) + preds := state.NewStatePredicates(paymentCreator) + finished := make(chan struct{}) + err = ev.StateChanged(func(ts *types.TipSet) (done bool, more bool, err error) { + act, err := paymentCreator.StateReadState(ctx, channel, ts.Key()) + if err != nil { + return false, false, err + } + state := act.State.(paych.State) + if state.ToSend.GreaterThanEqual(abi.NewTokenAmount(6000)) { + return true, false, nil + } + return false, true, nil + }, func(oldTs, newTs *types.TipSet, states events.StateChange, curH abi.ChainEpoch) (more bool, err error) { + toSendChange := states.(*state.PayChToSendChange) + if toSendChange.NewToSend.GreaterThanEqual(abi.NewTokenAmount(6000)) { + close(finished) + return false, nil + } + return true, nil + }, func(ctx context.Context, ts *types.TipSet) error { + return nil + }, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { + return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key()) + }) + if err != nil { + t.Fatal(err) + } + + select { + case <-finished: + case <-time.After(time.Second): + t.Fatal("Timed out waiting for receiver to submit vouchers") + } + + // wait for the settlement period to pass before collecting + waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, paych.SettleDelay) + + creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr) + if err != nil { + t.Fatal(err) + } + + // collect funds (from receiver, though either party can do it) + collectMsg, err := paymentReceiver.PaychCollect(ctx, channel) + if err != nil { + t.Fatal(err) + } + res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3) + if err != nil { + t.Fatal(err) + } + if res.Receipt.ExitCode != 0 { + t.Fatal("unable to collect on payment channel") + } + + // Finally, check the balance for the creator + currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr) + if err != nil { + t.Fatal(err) + } + + // The highest nonce voucher that the creator sent on each lane is 2000 + totalVouchers := int64(len(lanes) * 2000) + + // When receiver submits the tokens to the chain, creator should get a + // refund on the remaining balance, which is + // channel amount - total voucher value + expectedRefund := channelAmt - totalVouchers + delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance) + if !delta.Equals(abi.NewTokenAmount(expectedRefund)) { + t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta) + } + + // shut down mining + bm.Stop() +} + +func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) { + // We need to add null blocks in batches, if we add too many the chain can't sync + batchSize := 60 + for i := 0; i < count; i += batchSize { + size := batchSize + if i > count { + size = count - i + } + + // Add a batch of null blocks + atomic.StoreInt64(&bm.nulls, int64(size-1)) + + // Add a real block + m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{ + To: builtin.BurntFundsActorAddr, + From: receiverAddr, + Value: types.NewInt(0), + }, nil) + if err != nil { + t.Fatal(err) + } + + _, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1) + if err != nil { + t.Fatal(err) + } + } +} + +func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + + fmt.Println("Waiting for", desc) + res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1) + if err != nil { + fmt.Println("Error waiting for", desc, err) + t.Fatal(err) + } + if res.Receipt.ExitCode != 0 { + t.Fatalf("did not successfully send %s", desc) + } + fmt.Println("Confirmed", desc) + return res +} diff --git a/api/test/test.go b/api/test/test.go index 7925300be..409274ff1 100644 --- a/api/test/test.go +++ b/api/test/test.go @@ -4,19 +4,39 @@ import ( "context" "testing" + "github.com/multiformats/go-multiaddr" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/stretchr/testify/assert" + "github.com/filecoin-project/lotus/miner" ) type TestNode struct { api.FullNode + // ListenAddr is the address on which an API server is listening, if an + // API server is created for this Node + ListenAddr multiaddr.Multiaddr } type TestStorageNode struct { api.StorageMiner + // ListenAddr is the address on which an API server is listening, if an + // API server is created for this Node + ListenAddr multiaddr.Multiaddr - MineOne func(context.Context) error + MineOne func(context.Context, miner.MineReq) error +} + +var PresealGenesis = -1 + +const GenesisPreseals = 2 + +type StorageMiner struct { + Full int + Preseal int } // APIBuilder is a function which is invoked in test suite to provide @@ -24,7 +44,7 @@ type TestStorageNode struct { // // storage array defines storage nodes, numbers in the array specify full node // index the storage node 'belongs' to -type APIBuilder func(t *testing.T, nFull int, storage []int) ([]TestNode, []TestStorageNode) +type APIBuilder func(t *testing.T, nFull int, storage []StorageMiner) ([]TestNode, []TestStorageNode) type testSuite struct { makeNodes APIBuilder } @@ -39,25 +59,28 @@ func TestApis(t *testing.T, b APIBuilder) { t.Run("id", ts.testID) t.Run("testConnectTwo", ts.testConnectTwo) t.Run("testMining", ts.testMining) + t.Run("testMiningReal", ts.testMiningReal) } +var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}} + func (ts *testSuite) testVersion(t *testing.T) { + build.RunningNodeType = build.NodeFull + ctx := context.Background() - apis, _ := ts.makeNodes(t, 1, []int{0}) + apis, _ := ts.makeNodes(t, 1, OneMiner) api := apis[0] v, err := api.Version(ctx) if err != nil { t.Fatal(err) } - if v.Version != build.BuildVersion { - t.Error("Version didn't work properly") - } + require.Equal(t, v.Version, build.BuildVersion) } func (ts *testSuite) testID(t *testing.T) { ctx := context.Background() - apis, _ := ts.makeNodes(t, 1, []int{0}) + apis, _ := ts.makeNodes(t, 1, OneMiner) api := apis[0] id, err := api.ID(ctx) @@ -69,7 +92,7 @@ func (ts *testSuite) testID(t *testing.T) { func (ts *testSuite) testConnectTwo(t *testing.T) { ctx := context.Background() - apis, _ := ts.makeNodes(t, 2, []int{0}) + apis, _ := ts.makeNodes(t, 2, OneMiner) p, err := apis[0].NetPeers(ctx) if err != nil { diff --git a/api/test/util.go b/api/test/util.go new file mode 100644 index 000000000..8695e2e2e --- /dev/null +++ b/api/test/util.go @@ -0,0 +1,86 @@ +package test + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/miner" +) + +func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) { + senderAddr, err := sender.WalletDefaultAddress(ctx) + if err != nil { + t.Fatal(err) + } + + msg := &types.Message{ + From: senderAddr, + To: addr, + Value: amount, + } + + sm, err := sender.MpoolPushMessage(ctx, msg, nil) + if err != nil { + t.Fatal(err) + } + res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1) + if err != nil { + t.Fatal(err) + } + if res.Receipt.ExitCode != 0 { + t.Fatal("did not successfully send money") + } +} + +func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) { + for i := 0; i < 1000; i++ { + var success bool + var err error + var epoch abi.ChainEpoch + wait := make(chan struct{}) + mineErr := sn.MineOne(ctx, miner.MineReq{ + Done: func(win bool, ep abi.ChainEpoch, e error) { + success = win + err = e + epoch = ep + wait <- struct{}{} + }, + }) + if mineErr != nil { + t.Fatal(mineErr) + } + <-wait + if err != nil { + t.Fatal(err) + } + if success { + // Wait until it shows up on the given full nodes ChainHead + nloops := 50 + for i := 0; i < nloops; i++ { + ts, err := fn.ChainHead(ctx) + if err != nil { + t.Fatal(err) + } + if ts.Height() == epoch { + break + } + if i == nloops-1 { + t.Fatal("block never managed to sync to node") + } + time.Sleep(time.Millisecond * 10) + } + + if cb != nil { + cb(epoch) + } + return + } + t.Log("did not mine block, trying again", i) + } + t.Fatal("failed to mine 1000 times in a row...") +} diff --git a/api/test/window_post.go b/api/test/window_post.go new file mode 100644 index 000000000..683489a91 --- /dev/null +++ b/api/test/window_post.go @@ -0,0 +1,324 @@ +package test + +import ( + "context" + "fmt" + + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/mock" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + bminer "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node/impl" +) + +func init() { + err := os.Setenv("BELLMAN_NO_GPU", "1") + if err != nil { + panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err)) + } +} + +func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx := context.Background() + n, sn := b(t, 1, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := true + done := make(chan struct{}) + go func() { + defer close(done) + for mine { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + } + }() + + pledgeSectors(t, ctx, miner, nSectors, 0, nil) + + mine = false + <-done +} + +func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { + for i := 0; i < n; i++ { + err := miner.PledgeSector(ctx) + require.NoError(t, err) + if i%3 == 0 && blockNotif != nil { + <-blockNotif + } + } + + for { + s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM + require.NoError(t, err) + fmt.Printf("Sectors: %d\n", len(s)) + if len(s) >= n+existing { + break + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + fmt.Printf("All sectors is fsm\n") + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + + toCheck := map[abi.SectorNumber]struct{}{} + for _, number := range s { + toCheck[number] = struct{}{} + } + + for len(toCheck) > 0 { + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d\n", len(s)) + } +} + +func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { + ctx := context.Background() + n, sn := b(t, 1, OneMiner) + client := n[0].FullNode.(*impl.FullNodeAPI) + miner := sn[0] + + addrinfo, err := client.NetAddrsListen(ctx) + if err != nil { + t.Fatal(err) + } + + if err := miner.NetConnect(ctx, addrinfo); err != nil { + t.Fatal(err) + } + build.Clock.Sleep(time.Second) + + mine := true + done := make(chan struct{}) + go func() { + defer close(done) + for mine { + build.Clock.Sleep(blocktime) + if err := sn[0].MineOne(ctx, MineNext); err != nil { + t.Error(err) + } + } + }() + + pledgeSectors(t, ctx, miner, nSectors, 0, nil) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + mid, err := address.IDFromAddress(maddr) + require.NoError(t, err) + + fmt.Printf("Running one proving period\n") + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 { + break + } + + if head.Height()%100 == 0 { + fmt.Printf("@%d\n", head.Height()) + } + build.Clock.Sleep(blocktime) + } + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals))) + + fmt.Printf("Drop some sectors\n") + + // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) + { + parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + secs := parts[0].AllSectors + require.NoError(t, err) + n, err := secs.Count() + require.NoError(t, err) + require.Equal(t, uint64(2), n) + + // Drop the partition + err = secs.ForEach(func(sid uint64) error { + return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sid), + }, true) + }) + require.NoError(t, err) + } + + var s abi.SectorID + + // Drop 1 sectors from deadline 3 partition 0 + { + parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + secs := parts[0].AllSectors + require.NoError(t, err) + n, err := secs.Count() + require.NoError(t, err) + require.Equal(t, uint64(2), n) + + // Drop the sector + sn, err := secs.First() + require.NoError(t, err) + + all, err := secs.All(2) + require.NoError(t, err) + fmt.Println("the sectors", all) + + s = abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sn), + } + + err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true) + require.NoError(t, err) + } + + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + fmt.Printf("Go through another PP, wait for sectors to become faulty\n") + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 { + break + } + + if head.Height()%100 == 0 { + fmt.Printf("@%d\n", head.Height()) + } + build.Clock.Sleep(blocktime) + } + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+GenesisPreseals-3, int(sectors)) // -3 just removed sectors + + fmt.Printf("Recover one sector\n") + + err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) + require.NoError(t, err) + + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { + break + } + + if head.Height()%100 == 0 { + fmt.Printf("@%d\n", head.Height()) + } + build.Clock.Sleep(blocktime) + } + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+GenesisPreseals-2, int(sectors)) // -2 not recovered sectors + + // pledge a sector after recovery + + pledgeSectors(t, ctx, miner, 1, nSectors, nil) + + { + // wait a bit more + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + waitUntil := head.Height() + 10 + + for { + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + if head.Height() > waitUntil { + break + } + } + } + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged + + mine = false + <-done +} diff --git a/api/types.go b/api/types.go index cd2717bbd..a69aa28d9 100644 --- a/api/types.go +++ b/api/types.go @@ -2,41 +2,20 @@ package api import ( "encoding/json" + "fmt" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/build" + "github.com/ipfs/go-cid" + + "github.com/libp2p/go-libp2p-core/peer" + pubsub "github.com/libp2p/go-libp2p-pubsub" ma "github.com/multiformats/go-multiaddr" ) -type DealState = uint64 - -const ( - DealUnknown = DealState(iota) - DealRejected // Provider didn't like the proposal - DealAccepted // Proposal accepted, data moved - DealStaged // Data put into the sector - DealSealing // Data in process of being sealed - - DealFailed - DealComplete - - // Internal - - DealError // deal failed with an unexpected error - - DealNoUpdate = DealUnknown -) - -var DealStates = []string{ - "DealUnknown", - "DealRejected", - "DealAccepted", - "DealStaged", - "DealSealing", - "DealFailed", - "DealComplete", - "DealError", -} - // TODO: check if this exists anywhere else + type MultiaddrSlice []ma.Multiaddr func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) { @@ -57,3 +36,74 @@ func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) { } var _ json.Unmarshaler = new(MultiaddrSlice) + +type ObjStat struct { + Size uint64 + Links uint64 +} + +type PubsubScore struct { + ID peer.ID + Score *pubsub.PeerScoreSnapshot +} + +type MessageSendSpec struct { + MaxFee abi.TokenAmount +} + +var DefaultMessageSendSpec = MessageSendSpec{ + // MaxFee of 0.1FIL + MaxFee: abi.NewTokenAmount(int64(build.FilecoinPrecision) / 10), +} + +func (ms *MessageSendSpec) Get() MessageSendSpec { + if ms == nil { + return DefaultMessageSendSpec + } + + return *ms +} + +type DataTransferChannel struct { + TransferID datatransfer.TransferID + Status datatransfer.Status + BaseCID cid.Cid + IsInitiator bool + IsSender bool + Voucher string + Message string + OtherPeer peer.ID + Transferred uint64 +} + +// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id +func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel { + channel := DataTransferChannel{ + TransferID: channelState.TransferID(), + Status: channelState.Status(), + BaseCID: channelState.BaseCID(), + IsSender: channelState.Sender() == hostID, + Message: channelState.Message(), + } + stringer, ok := channelState.Voucher().(fmt.Stringer) + if ok { + channel.Voucher = stringer.String() + } else { + voucherJSON, err := json.Marshal(channelState.Voucher()) + if err != nil { + channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error() + } else { + channel.Voucher = string(voucherJSON) + } + } + if channel.IsSender { + channel.IsInitiator = !channelState.IsPull() + channel.Transferred = channelState.Sent() + channel.OtherPeer = channelState.Recipient() + } else { + channel.IsInitiator = channelState.IsPull() + channel.Transferred = channelState.Received() + channel.OtherPeer = channelState.Sender() + } + return channel +} diff --git a/api/utils.go b/api/utils.go index 0576ad1d4..a9d02c31b 100644 --- a/api/utils.go +++ b/api/utils.go @@ -4,12 +4,12 @@ import ( "context" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/go-state-types/crypto" ) -type SignFunc = func(context.Context, []byte) (*types.Signature, error) +type SignFunc = func(context.Context, []byte) (*crypto.Signature, error) -type Signer func(context.Context, address.Address, []byte) (*types.Signature, error) +type Signer func(context.Context, address.Address, []byte) (*crypto.Signature, error) type Signable interface { Sign(context.Context, SignFunc) error @@ -17,7 +17,7 @@ type Signable interface { func SignWith(ctx context.Context, signer Signer, addr address.Address, signable ...Signable) error { for _, s := range signable { - err := s.Sign(ctx, func(ctx context.Context, b []byte) (*types.Signature, error) { + err := s.Sign(ctx, func(ctx context.Context, b []byte) (*crypto.Signature, error) { return signer(ctx, addr, b) }) if err != nil { diff --git a/bin/dist_get b/bin/dist_get deleted file mode 100755 index 39320443a..000000000 --- a/bin/dist_get +++ /dev/null @@ -1,177 +0,0 @@ -#!/bin/sh - -GOCC=${GOCC=go} - -die() { - echo "$@" >&2 - exit 1 -} - -have_binary() { - type "$1" > /dev/null 2> /dev/null -} - -check_writable() { - printf "" > "$1" && rm "$1" -} - -try_download() { - url="$1" - output="$2" - command="$3" - util_name="$(set -- $command; echo "$1")" - - if ! have_binary "$util_name"; then - return 1 - fi - - printf '==> Using %s to download "%s" to "%s"\n' "$util_name" "$url" "$output" - if eval "$command"; then - echo "==> Download complete!" - return - else - echo "error: couldn't download with $util_name ($?)" - return 1 - fi -} - -download() { - dl_url="$1" - dl_output="$2" - - test "$#" -eq "2" || die "download requires exactly two arguments, was given $@" - - if ! check_writable "$dl_output"; then - die "download error: cannot write to $dl_output" - fi - - try_download "$dl_url" "$dl_output" "wget '$dl_url' -O '$dl_output'" && return - try_download "$dl_url" "$dl_output" "curl --silent --fail --output '$dl_output' '$dl_url'" && return - try_download "$dl_url" "$dl_output" "fetch '$dl_url' -o '$dl_output'" && return - try_download "$dl_url" "$dl_output" "http '$dl_url' > '$dl_output'" && return - try_download "$dl_url" "$dl_output" "ftp -o '$dl_output' '$dl_url'" && return - - die "Unable to download $dl_url. exiting." -} - -unarchive() { - ua_archivetype="$1" - ua_infile="$2" - ua_outfile="$3" - ua_distname="$4" - ua_binpostfix="" - ua_os=$(uname -o) - - if [ "$ua_os" = "Msys" ] || [ "$ua_os" = "Cygwin" ] ; then - ua_binpostfix=".exe" - fi - ua_outfile="$ua_outfile$ua_binpostfix" - - if ! check_writable "$ua_outfile"; then - die "unarchive error: cannot write to $ua_outfile" - fi - - case "$ua_archivetype" in - tar.gz) - if have_binary tar; then - echo "==> using 'tar' to extract binary from archive" - < "$ua_infile" tar -Ozxf - "$ua_distname/$ua_distname$ua_binpostfix" > "$ua_outfile" \ - || die "tar has failed" - else - die "no binary on system for extracting tar files" - fi - ;; - zip) - if have_binary unzip; then - echo "==> using 'unzip' to extract binary from archive" - unzip -p "$ua_infile" "$ua_distname/$ua_distname$ua_binpostfix" > "$ua_outfile" \ - || die "unzip has failed" - else - die "no installed method for extracting .zip archives" - fi - ;; - *) - die "unrecognized archive type '$ua_archivetype'" - esac - - chmod +x "$ua_outfile" || die "chmod has failed" -} - -get_go_vars() { - if [ ! -z "$GOOS" ] && [ ! -z "$GOARCH" ]; then - printf "%s-%s" "$GOOS" "$GOARCH" - elif have_binary go; then - printf "%s-%s" "$($GOCC env GOOS)" "$($GOCC env GOARCH)" - else - die "no way of determining system GOOS and GOARCH\nPlease manually set GOOS and GOARCH then retry." - fi -} - -mkurl() { - m_root="$1" - m_name="$2" - m_vers="$3" - m_archive="$4" - m_govars=$(get_go_vars) || die "could not get go env vars" - - echo "https://ipfs.io$m_root/$m_name/$m_vers/${m_name}_${m_vers}_$m_govars.$m_archive" -} - -distroot="$1" -distname="$2" -outpath="$3" -version="$4" - -if [ -z "$distroot" ] || [ -z "$distname" ] || [ -z "$outpath" ] || [ -z "$version" ]; then - die "usage: dist_get " -fi - -case $version in - v*) - # correct input - ;; - *) - echo "invalid version '$version'" >&2 - die "versions must begin with 'v', for example: v0.4.0" - ;; -esac - -# TODO: don't depend on the go tool being installed to detect this -goenv=$(get_go_vars) || die "could not get go env vars" - -case $goenv in - linux-*) - archive="tar.gz" - ;; - darwin-*) - archive="tar.gz" - ;; - windows-*) - archive="zip" - ;; - freebsd-*) - archive="tar.gz" - ;; - openbsd-*) - archive="tar.gz" - ;; - *) - echo "unrecognized system environment: $goenv" >&2 - die "currently only linux, darwin, windows and freebsd are supported by this script" -esac - - -mkdir -p bin/tmp - -url=$(mkurl "$distroot" "$distname" "$version" "$archive") -tmpfi="bin/tmp/$distname.$archive" - -download "$url" "$tmpfi" -if [ $? -ne 0 ]; then - die "failed to download $url to $tmpfi" -fi - -unarchive "$archive" "$tmpfi" "$outpath" "$distname" -if [ $? -ne 0 ]; then - die "failed to extract archive $tmpfi" -fi diff --git a/build/bootstrap.go b/build/bootstrap.go index ca6cc26ce..80c1529ff 100644 --- a/build/bootstrap.go +++ b/build/bootstrap.go @@ -13,6 +13,10 @@ import ( ) func BuiltinBootstrap() ([]peer.AddrInfo, error) { + if DisableBuiltinAssets { + return nil, nil + } + var out []peer.AddrInfo b := rice.MustFindBox("bootstrap") diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi index 802f1471a..9b05a821c 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/bootstrappers.pi @@ -1,8 +1,9 @@ -/dns4/lotus-bootstrap-0.dfw.fil-test.net/tcp/1347/p2p/12D3KooWHwGBSiLR5ts7KW9MgH4BMzC2iXe18kwAQ8Ee3LUd1jeR -/dns4/lotus-bootstrap-1.dfw.fil-test.net/tcp/1347/p2p/12D3KooWCLFaawdhLGcSpiqg43DtZ9QzPQ6HcB8Vvyu2Cnta8UWc -/dns4/lotus-bootstrap-0.fra.fil-test.net/tcp/1347/p2p/12D3KooWMmaL7eaUCF6tVAghVmgozxz4uztbuFUQv6dyFpHRarHR -/dns4/lotus-bootstrap-1.fra.fil-test.net/tcp/1347/p2p/12D3KooWLLpNYoKdf9NgcWudBhXLdTcXncqAsTzozw1scMMu6nS5 -/dns4/lotus-bootstrap-0.sin.fil-test.net/tcp/1347/p2p/12D3KooWCNL9vXaXwNs3Bu8uRAJK4pxpCyPeM7jZLSDpJma1wrV8 -/dns4/lotus-bootstrap-1.sin.fil-test.net/tcp/1347/p2p/12D3KooWNGGxFda1eC5U2YKAgs4ypoFHn3Z3xHCsjmFdrCcytoxm +/dns4/bootstrap-0.testnet.fildev.network/tcp/1347/p2p/12D3KooWJTUBUjtzWJGWU1XSiY21CwmHaCNLNYn2E7jqHEHyZaP7 +/dns4/bootstrap-1.testnet.fildev.network/tcp/1347/p2p/12D3KooW9yeKXha4hdrJKq74zEo99T8DhriQdWNoojWnnQbsgB3v +/dns4/bootstrap-2.testnet.fildev.network/tcp/1347/p2p/12D3KooWCrx8yVG9U9Kf7w8KLN3Edkj5ZKDhgCaeMqQbcQUoB6CT +/dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34 +/dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T +/dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W +/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C /dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz -/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u +/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u \ No newline at end of file diff --git a/build/clock.go b/build/clock.go new file mode 100644 index 000000000..a3943897d --- /dev/null +++ b/build/clock.go @@ -0,0 +1,10 @@ +package build + +import "github.com/raulk/clock" + +// Clock is the global clock for the system. In standard builds, +// we use a real-time clock, which maps to the `time` package. +// +// Tests that need control of time can replace this variable with +// clock.NewMock(). Always use real time for socket/stream deadlines. +var Clock = clock.New() diff --git a/build/drand.go b/build/drand.go new file mode 100644 index 000000000..73299249a --- /dev/null +++ b/build/drand.go @@ -0,0 +1,83 @@ +package build + +import ( + "sort" + + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +type DrandEnum int + +func DrandConfigSchedule() dtypes.DrandSchedule { + out := dtypes.DrandSchedule{} + for start, config := range DrandSchedule { + out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]}) + } + + sort.Slice(out, func(i, j int) bool { + return out[i].Start < out[j].Start + }) + + return out +} + +const ( + DrandMainnet DrandEnum = iota + 1 + DrandTestnet + DrandDevnet + DrandLocalnet + DrandIncentinet +) + +var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ + DrandMainnet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, + }, + DrandTestnet: { + Servers: []string{ + "https://pl-eu.testnet.drand.sh", + "https://pl-us.testnet.drand.sh", + "https://pl-sin.testnet.drand.sh", + }, + Relays: []string{ + "/dnsaddr/pl-eu.testnet.drand.sh/", + "/dnsaddr/pl-us.testnet.drand.sh/", + "/dnsaddr/pl-sin.testnet.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, + }, + DrandDevnet: { + Servers: []string{ + "https://dev1.drand.sh", + "https://dev2.drand.sh", + }, + Relays: []string{ + "/dnsaddr/dev1.drand.sh/", + "/dnsaddr/dev2.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, + }, + DrandIncentinet: { + Servers: []string{ + "https://pl-eu.incentinet.drand.sh", + "https://pl-us.incentinet.drand.sh", + "https://pl-sin.incentinet.drand.sh", + }, + Relays: []string{ + "/dnsaddr/pl-eu.incentinet.drand.sh/", + "/dnsaddr/pl-us.incentinet.drand.sh/", + "/dnsaddr/pl-sin.incentinet.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, + }, +} diff --git a/build/flags.go b/build/flags.go new file mode 100644 index 000000000..33e9f6ede --- /dev/null +++ b/build/flags.go @@ -0,0 +1,15 @@ +package build + +// DisableBuiltinAssets disables the resolution of go.rice boxes that store +// built-in assets, such as proof parameters, bootstrap peers, genesis blocks, +// etc. +// +// When this value is set to true, it is expected that the user will +// provide any such configurations through the Lotus API itself. +// +// This is useful when you're using Lotus as a library, such as to orchestrate +// test scenarios, or for other purposes where you don't need to use the +// defaults shipped with the binary. +// +// For this flag to be effective, it must be enabled _before_ instantiating Lotus. +var DisableBuiltinAssets = false diff --git a/build/forks.go b/build/forks.go index 8e8f53abe..5c93a9353 100644 --- a/build/forks.go +++ b/build/forks.go @@ -1,4 +1 @@ package build - -const ForkCCM = 1750 -const ForkNoPowerEPSUpdates = 16450 diff --git a/build/genesis.go b/build/genesis.go index cdcf0402e..dc4ded273 100644 --- a/build/genesis.go +++ b/build/genesis.go @@ -2,7 +2,7 @@ package build import ( rice "github.com/GeertJohan/go.rice" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" ) // moved from now-defunct build/paramfetch.go @@ -11,12 +11,12 @@ var log = logging.Logger("build") func MaybeGenesis() []byte { builtinGen, err := rice.FindBox("genesis") if err != nil { - log.Warn("loading built-in genesis: %s", err) + log.Warnf("loading built-in genesis: %s", err) return nil } genBytes, err := builtinGen.Bytes("devnet.car") if err != nil { - log.Warn("loading built-in genesis: %s", err) + log.Warnf("loading built-in genesis: %s", err) } return genBytes diff --git a/build/genesis/devnet.car b/build/genesis/devnet.car index d0aef2486..f1b3f342a 100644 Binary files a/build/genesis/devnet.car and b/build/genesis/devnet.car differ diff --git a/build/parameters.go b/build/parameters.go index 433fa497b..7d34a7831 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -2,4 +2,6 @@ package build import rice "github.com/GeertJohan/go.rice" -var ParametersJson = rice.MustFindBox("proof-params").MustBytes("parameters.json") +func ParametersJSON() []byte { + return rice.MustFindBox("proof-params").MustBytes("parameters.json") +} diff --git a/build/params_2k.go b/build/params_2k.go new file mode 100644 index 000000000..3682f7be1 --- /dev/null +++ b/build/params_2k.go @@ -0,0 +1,41 @@ +// +build debug 2k + +package build + +import ( + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 0 + +const UpgradeSmokeHeight = -1 +const UpgradeIgnitionHeight = -2 +const UpgradeLiftoffHeight = -3 + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +func init() { + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + + BuildType |= Build2k +} + +const BlockDelaySecs = uint64(4) + +const PropagationDelaySecs = uint64(1) + +// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after +// which the miner is slashed +// +// Epochs +const SlashablePowerDelay = 20 + +// Epochs +const InteractivePoRepConfidence = 6 diff --git a/build/params_debug.go b/build/params_debug.go index 938b03925..f679c9178 100644 --- a/build/params_debug.go +++ b/build/params_debug.go @@ -2,36 +2,9 @@ package build -import "os" - -var SectorSizes = []uint64{1024} - -// Seconds -const BlockDelay = 6 - -const PropagationDelay = 3 - -// FallbackPoStDelay is the number of epochs the miner needs to wait after -// ElectionPeriodStart before starting fallback post computation -// -// Epochs -const FallbackPoStDelay = 10 - -// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after -// which the miner is slashed -// -// Epochs -const SlashablePowerDelay = 20 - -// Epochs -const InteractivePoRepDelay = 2 - -// Epochs -const InteractivePoRepConfidence = 6 - -// Bytes -var MinimumMinerPower uint64 = 2 << 10 // 2KiB - func init() { - os.Setenv("TRUST_PARAMS", "1") + InsecurePoStValidation = true + BuildType |= BuildDebug } + +// NOTE: Also includes settings from params_2k diff --git a/build/params_shared.go b/build/params_shared.go deleted file mode 100644 index ec4982cc1..000000000 --- a/build/params_shared.go +++ /dev/null @@ -1,99 +0,0 @@ -package build - -import ( - "math/big" -) - -// Core network constants - -// ///// -// Storage - -const UnixfsChunkSize uint64 = 1 << 20 -const UnixfsLinksPerLevel = 1024 - -func SupportedSectorSize(ssize uint64) bool { - for _, ss := range SectorSizes { - if ssize == ss { - return true - } - } - return false -} - -// ///// -// Payments - -// Epochs -const PaymentChannelClosingDelay = 6 * 60 * 60 / BlockDelay // six hours - -// ///// -// Consensus / Network - -// Seconds -const AllowableClockDrift = 1 - -// Epochs -const ForkLengthThreshold = Finality - -// Blocks (e) -const BlocksPerEpoch = 5 - -// Epochs -const Finality = 500 - -// constants for Weight calculation -// The ratio of weight contributed by short-term vs long-term factors in a given round -const WRatioNum = int64(1) -const WRatioDen = 2 - -// ///// -// Proofs - -// Epochs -const SealRandomnessLookback = Finality - -// Epochs -const SealRandomnessLookbackLimit = SealRandomnessLookback + 2000 - -// ///// -// Mining - -// Epochs -const EcRandomnessLookback = 300 - -const PowerCollateralProportion = 5 -const PerCapitaCollateralProportion = 1 -const CollateralPrecision = 1000 - -// ///// -// Devnet settings - -const TotalFilecoin = 2_000_000_000 -const MiningRewardTotal = 1_400_000_000 - -const InitialRewardStr = "153856861913558700202" - -var InitialReward *big.Int - -const FilecoinPrecision = 1_000_000_000_000_000_000 - -// TODO: Move other important consts here - -func init() { - InitialReward = new(big.Int) - - var ok bool - InitialReward, ok = InitialReward. - SetString(InitialRewardStr, 10) - if !ok { - panic("could not parse InitialRewardStr") - } -} - -// Sync -const BadBlockCacheSize = 1 << 15 - -// assuming 4000 messages per round, this lets us not lose any messages across a -// 10 block reorg. -const BlsSignatureCacheSize = 40000 diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go new file mode 100644 index 000000000..28567d3d1 --- /dev/null +++ b/build/params_shared_funcs.go @@ -0,0 +1,52 @@ +package build + +import ( + "sort" + + "github.com/filecoin-project/go-address" + + "github.com/libp2p/go-libp2p-core/protocol" + + "github.com/filecoin-project/go-state-types/abi" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +func DefaultSectorSize() abi.SectorSize { + szs := make([]abi.SectorSize, 0, len(miner0.SupportedProofTypes)) + for spt := range miner0.SupportedProofTypes { + ss, err := spt.SectorSize() + if err != nil { + panic(err) + } + + szs = append(szs, ss) + } + + sort.Slice(szs, func(i, j int) bool { + return szs[i] < szs[j] + }) + + return szs[0] +} + +// Core network constants + +func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) } +func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) } +func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { + return protocol.ID("/fil/kad/" + string(netName)) +} + +func UseNewestNetwork() bool { + // TODO: Put these in a container we can iterate over + if UpgradeBreezeHeight <= 0 && UpgradeSmokeHeight <= 0 { + return true + } + return false +} + +func SetAddressNetwork(n address.Network) { + address.CurrentNetwork = n +} diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go new file mode 100644 index 000000000..33828f954 --- /dev/null +++ b/build/params_shared_vals.go @@ -0,0 +1,121 @@ +// +build !testground + +package build + +import ( + "math/big" + "os" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +// ///// +// Storage + +const UnixfsChunkSize uint64 = 1 << 20 +const UnixfsLinksPerLevel = 1024 + +// ///// +// Consensus / Network + +const AllowableClockDriftSecs = uint64(1) +const NewestNetworkVersion = network.Version3 +const ActorUpgradeNetworkVersion = network.Version4 + +// Epochs +const ForkLengthThreshold = Finality + +// Blocks (e) +var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) + +// Epochs +const Finality = miner0.ChainFinality +const MessageConfidence = uint64(5) + +// constants for Weight calculation +// The ratio of weight contributed by short-term vs long-term factors in a given round +const WRatioNum = int64(1) +const WRatioDen = uint64(2) + +// ///// +// Proofs + +// Epochs +const SealRandomnessLookback = Finality + +// Epochs +const SealRandomnessLookbackLimit = SealRandomnessLookback + 2000 // TODO: Get from spec specs-actors + +// Maximum lookback that randomness can be sourced from for a seal proof submission +const MaxSealLookback = SealRandomnessLookbackLimit + 2000 // TODO: Get from specs-actors + +// ///// +// Mining + +// Epochs +const TicketRandomnessLookback = abi.ChainEpoch(1) + +const WinningPoStSectorSetLookback = abi.ChainEpoch(10) + +// ///// +// Address + +const AddressMainnetEnvVar = "_mainnet_" + +// ///// +// Devnet settings + +var Devnet = true + +const FilBase = uint64(2_000_000_000) +const FilAllocStorageMining = uint64(1_100_000_000) + +const FilecoinPrecision = uint64(1_000_000_000_000_000_000) + +var InitialRewardBalance *big.Int + +// TODO: Move other important consts here + +func init() { + InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) + InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) + + if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar { + SetAddressNetwork(address.Mainnet) + } +} + +// Sync +const BadBlockCacheSize = 1 << 15 + +// assuming 4000 messages per round, this lets us not lose any messages across a +// 10 block reorg. +const BlsSignatureCacheSize = 40000 + +// Size of signature verification cache +// 32k keeps the cache around 10MB in size, max +const VerifSigCacheSize = 32000 + +// /////// +// Limits + +// TODO: If this is gonna stay, it should move to specs-actors +const BlockMessageLimit = 10000 + +const BlockGasLimit = 10_000_000_000 +const BlockGasTarget = BlockGasLimit / 2 +const BaseFeeMaxChangeDenom = 8 // 12.5% +const InitialBaseFee = 100e6 +const MinimumBaseFee = 100 +const PackingEfficiencyNum = 4 +const PackingEfficiencyDenom = 5 + +// Actor consts +// TODO: Pull from actors when its made not private +var MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) diff --git a/build/params_testground.go b/build/params_testground.go new file mode 100644 index 000000000..07cc88688 --- /dev/null +++ b/build/params_testground.go @@ -0,0 +1,89 @@ +// +build testground + +// This file makes hardcoded parameters (const) configurable as vars. +// +// Its purpose is to unlock various degrees of flexibility and parametrization +// when writing Testground plans for Lotus. +// +package build + +import ( + "math/big" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +var ( + UnixfsChunkSize = uint64(1 << 20) + UnixfsLinksPerLevel = 1024 + + BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) + BlockMessageLimit = 512 + BlockGasLimit = int64(100_000_000_000) + BlockGasTarget = int64(BlockGasLimit / 2) + BaseFeeMaxChangeDenom = int64(8) // 12.5% + InitialBaseFee = int64(100e6) + MinimumBaseFee = int64(100) + BlockDelaySecs = uint64(builtin.EpochDurationSeconds) + PropagationDelaySecs = uint64(6) + + AllowableClockDriftSecs = uint64(1) + + Finality = miner0.ChainFinality + ForkLengthThreshold = Finality + + SlashablePowerDelay = 20 + InteractivePoRepConfidence = 6 + + MessageConfidence uint64 = 5 + + WRatioNum = int64(1) + WRatioDen = uint64(2) + + BadBlockCacheSize = 1 << 15 + BlsSignatureCacheSize = 40000 + VerifSigCacheSize = 32000 + + SealRandomnessLookback = Finality + SealRandomnessLookbackLimit = SealRandomnessLookback + 2000 + MaxSealLookback = SealRandomnessLookbackLimit + 2000 + + TicketRandomnessLookback = abi.ChainEpoch(1) + WinningPoStSectorSetLookback = abi.ChainEpoch(10) + + FilBase uint64 = 2_000_000_000 + FilAllocStorageMining uint64 = 1_400_000_000 + + FilecoinPrecision uint64 = 1_000_000_000_000_000_000 + + InitialRewardBalance = func() *big.Int { + v := big.NewInt(int64(FilAllocStorageMining)) + v = v.Mul(v, big.NewInt(int64(FilecoinPrecision))) + return v + }() + // Actor consts + // TODO: Pull from actors when its made not private + MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) + + PackingEfficiencyNum int64 = 4 + PackingEfficiencyDenom int64 = 5 + + UpgradeBreezeHeight abi.ChainEpoch = -1 + BreezeGasTampingDuration abi.ChainEpoch = 0 + + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeLiftoffHeight abi.ChainEpoch = -3 + + DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, + } + + NewestNetworkVersion = network.Version2 + ActorUpgradeNetworkVersion = network.Version3 + + Devnet = true +) diff --git a/build/params_testnet.go b/build/params_testnet.go index aa75ebcd7..960f3a9b6 100644 --- a/build/params_testnet.go +++ b/build/params_testnet.go @@ -1,34 +1,42 @@ // +build !debug +// +build !2k +// +build !testground package build -var SectorSizes = []uint64{ - 1 << 30, - 32 << 30, +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" +) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandIncentinet, + UpgradeSmokeHeight: DrandMainnet, } -// Seconds -const BlockDelay = 45 +const UpgradeBreezeHeight = 41280 +const BreezeGasTampingDuration = 120 -const PropagationDelay = 6 +const UpgradeSmokeHeight = 51000 -// FallbackPoStDelay is the number of epochs the miner needs to wait after -// ElectionPeriodStart before starting fallback post computation -// -// Epochs -const FallbackPoStDelay = 30 +const UpgradeIgnitionHeight = 94000 -// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after -// which the miner is slashed -// -// Epochs -const SlashablePowerDelay = 200 +// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier. +// Miners, clients, developers, custodians all need time to prepare. +// We still have upgrades and state changes to do, but can happen after signaling timing here. +const UpgradeLiftoffHeight = 148888 -// Epochs -const InteractivePoRepDelay = 8 +func init() { + policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + ) + Devnet = false +} -// Epochs -const InteractivePoRepConfidence = 6 +const BlockDelaySecs = uint64(builtin0.EpochDurationSeconds) -// Bytes -var MinimumMinerPower uint64 = 512 << 30 // 512GB +const PropagationDelaySecs = uint64(6) diff --git a/build/proof-params/parameters.json b/build/proof-params/parameters.json index 7e1988a41..1d4584454 100644 --- a/build/proof-params/parameters.json +++ b/build/proof-params/parameters.json @@ -1,103 +1,152 @@ { - "v20-proof-of-spacetime-election-5f585aca354eb68e411c8582ed0efd800792430e4e76d73468c4fc03f1a8d6d2.params": { - "cid": "QmX7tYeNPWae2fjZ3Am6GB9dmHvLqvoz8dKo3PR98VYxH9", - "digest": "39a9edec3355516674f0d12b926be493", + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { + "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", + "digest": "7610b9f82bfc88405b7a832b651ce2f6", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { + "cid": "QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X", + "digest": "0e0958009936b9d5e515ec97b8cb792d", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { + "cid": "QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR", + "digest": "1a7d4a9c8a502a497ed92a54366af33f", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { + "cid": "QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV", + "digest": "4dae975de4f011f101f5a2f86d1daaba", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { + "cid": "QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS", + "digest": "82c88066be968bb550a05e30ff6c2413", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { + "cid": "QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU", + "digest": "ffd79788d614d27919ae5bd2d94eacb6", + "sector_size": 2048 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { + "cid": "QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP", + "digest": "700171ecf7334e3199437c930676af82", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { + "cid": "QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG", + "digest": "79ebb55f56fda427743e35053edad8fc", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { + "cid": "QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx", + "digest": "c49499bb76a0762884896f9683403f55", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { + "cid": "QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc", + "digest": "34d4feeacd9abf788d69ef1bb4d8fd00", + "sector_size": 8388608 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { + "cid": "QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT", + "digest": "827359440349fe8f5a016e7598993b79", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { + "cid": "QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN", + "digest": "bd2cd62f65c1ab84f19ca27e97b7c731", + "sector_size": 536870912 + }, + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { + "cid": "QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ", + "digest": "2cf49eb26f1fee94c85781a390ddb4c8", "sector_size": 34359738368 }, - "v20-proof-of-spacetime-election-5f585aca354eb68e411c8582ed0efd800792430e4e76d73468c4fc03f1a8d6d2.vk": { - "cid": "QmbNGx7pNbGiEr8ykoHxVXHW2LNSmGdsxKtj1onZCyguCX", - "digest": "0227ae7df4f2affe529ebafbbc7540ee", + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { + "cid": "QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE", + "digest": "0f8ec542485568fa3468c066e9fed82b", "sector_size": 34359738368 }, - "v20-proof-of-spacetime-election-a4e18190d4b4657ba1b4d08a341871b2a6f398e327cb9951b28ab141fbdbf49d.params": { - "cid": "QmRGZsNp4mp1cZshcXqt3VMuWscAEsiMa2iepF4CsWWoiv", - "digest": "991041a354b12c280542741f58c7f2ca", - "sector_size": 1024 - }, - "v20-proof-of-spacetime-election-a4e18190d4b4657ba1b4d08a341871b2a6f398e327cb9951b28ab141fbdbf49d.vk": { - "cid": "QmWpmrhCGVcfqLyqp5oGAnhPmCE5hGTPaauHi25mpQwRSU", - "digest": "91fac550e1f9bccab213830bb0c85bd6", - "sector_size": 1024 - }, - "v20-proof-of-spacetime-election-a9eb6d90b896a282ec2d3a875c6143e3fcff778f0da1460709e051833651559b.params": { - "cid": "QmenSZXh1EsSyHiSRvA6wb8yaPhYBTjrKehJw96Px5HnN4", - "digest": "6322eacd2773163ddd51f9ca7d645fc4", - "sector_size": 1073741824 - }, - "v20-proof-of-spacetime-election-a9eb6d90b896a282ec2d3a875c6143e3fcff778f0da1460709e051833651559b.vk": { - "cid": "QmPvZoMKofw6eDhDg5ESJA2QAZP8HvM6qMQk7fw4pq9bQf", - "digest": "0df62745fceac922e3e70847cfc70b52", - "sector_size": 1073741824 - }, - "v20-proof-of-spacetime-election-bf872523641b1de33553db2a177df13e412d7b3b0103e6696ae0a1cf5d525259.params": { - "cid": "QmVibFqzkZoL8cwQmzj8njPokCQGCCx4pBcUH77bzgJgV9", - "digest": "de9d71e672f286706a1673bd57abdaac", - "sector_size": 16777216 - }, - "v20-proof-of-spacetime-election-bf872523641b1de33553db2a177df13e412d7b3b0103e6696ae0a1cf5d525259.vk": { - "cid": "QmZa5FX27XyiEXQQLQpHqtMJKLzrcY8wMuj3pxzmSimSyu", - "digest": "7f796d3a0f13499181e44b5eee0cc744", - "sector_size": 16777216 - }, - "v20-proof-of-spacetime-election-ffc3fb192364238b60977839d14e3154d4a98313e30d46694a12af54b6874975.params": { - "cid": "Qmbt2SWWAmMcYoY3DAiRDXA8fAuqdqRLWucJMSxYmzBCmN", - "digest": "151ae0ae183fc141e8c2bebc28e5cc10", - "sector_size": 268435456 - }, - "v20-proof-of-spacetime-election-ffc3fb192364238b60977839d14e3154d4a98313e30d46694a12af54b6874975.vk": { - "cid": "QmUxvPu4xdVmjMFihUKoYyEdXBqxsXkvmxRweU7KouWHji", - "digest": "95eb89588e9d1832aca044c3a13178af", - "sector_size": 268435456 - }, - "v20-stacked-proof-of-replication-117839dacd1ef31e5968a6fd13bcd6fa86638d85c40c9241a1d07c2a954eb89b.params": { - "cid": "QmQZe8eLo2xXbhSDxtyYZNqEjqjdcWGdADywECRvNEZQdX", - "digest": "fcd50e2e08a8560a6bb3418e883567ed", - "sector_size": 268435456 - }, - "v20-stacked-proof-of-replication-117839dacd1ef31e5968a6fd13bcd6fa86638d85c40c9241a1d07c2a954eb89b.vk": { - "cid": "Qme1hn6QT1covfoUFGDZkqoE1pMTax9FNW3nWWmTNqFe7y", - "digest": "872e244d86499fd659082e3bcf3f13e7", - "sector_size": 268435456 - }, - "v20-stacked-proof-of-replication-b46f3a1051afbb67f70aae7082da95def62eee943662f3e1bf69837fb08aaae4.params": { - "cid": "QmSfrPDC9jwY4MKrjzhCqDBBAG44wSDM8oE5NuDwWSh2xN", - "digest": "0a338b941c5f17946340de5fc95cab30", + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { + "cid": "Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i", + "digest": "d84f79a16fe40e9e25a36e2107bb1ba0", "sector_size": 34359738368 }, - "v20-stacked-proof-of-replication-b46f3a1051afbb67f70aae7082da95def62eee943662f3e1bf69837fb08aaae4.vk": { - "cid": "QmTDGynCmnbaZNBP3Bv3F3duC3ecKRubCKeMUiQQZYbGpF", - "digest": "c752e070a6b7aa8b79aa661a6b600b55", + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { + "cid": "QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF", + "digest": "fc02943678dd119e69e7fab8420e8819", "sector_size": 34359738368 }, - "v20-stacked-proof-of-replication-e71093863cadc71de61f38311ee45816633973bbf34849316b147f8d2e66f199.params": { - "cid": "QmXjSSnMUnc7EjQBYtTHhvLU3kXJTbUyhVhJRSTRehh186", - "digest": "efa407fd09202dffd15799a8518e73d3", - "sector_size": 1024 + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { + "cid": "QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V", + "digest": "3810b7780ac0e299b22ae70f1f94c9bc", + "sector_size": 68719476736 }, - "v20-stacked-proof-of-replication-e71093863cadc71de61f38311ee45816633973bbf34849316b147f8d2e66f199.vk": { - "cid": "QmYHW3zhQouDP4okFbXSsRMcZ8bokKGvzxqbv7ZrunPMiG", - "digest": "b2f09a0ccb62da28c890d5b881c8dcd2", - "sector_size": 1024 + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { + "cid": "QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7", + "digest": "59d2bf1857adc59a4f08fcf2afaa916b", + "sector_size": 68719476736 }, - "v20-stacked-proof-of-replication-e99a585174b6a45b254ba4780d72c89ad808c305c6d11711009ade4f39dba8e9.params": { - "cid": "QmUhyfNeLb32LfSkjsUwTFYLXQGMj6JQ8daff4DdVMt79q", - "digest": "b53c1916a63839ec345aa2224e9198b7", - "sector_size": 1073741824 + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { + "cid": "QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz", + "digest": "2170a91ad5bae22ea61f2ea766630322", + "sector_size": 68719476736 }, - "v20-stacked-proof-of-replication-e99a585174b6a45b254ba4780d72c89ad808c305c6d11711009ade4f39dba8e9.vk": { - "cid": "QmWReGfbuoozNErbskmFvqV4q36BY6F2WWb4cVFc3zoYkA", - "digest": "20d58a3fae7343481f8298a2dd493dd7", - "sector_size": 1073741824 + "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { + "cid": "QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm", + "digest": "6d3789148fb6466d07ee1e24d6292fd6", + "sector_size": 68719476736 }, - "v20-stacked-proof-of-replication-f571ee2386f4c65a68e802747f2d78691006fc81a67971c4d9641403fffece16.params": { - "cid": "QmSAHu14Pe8iav6BYCt9XkpHJ73XM7tcpY4d9JK9BST9HU", - "digest": "7698426202c7e07b26ef056d31485b3a", - "sector_size": 16777216 + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { + "cid": "QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h", + "digest": "434fb1338ecfaf0f59256f30dde4968f", + "sector_size": 2048 }, - "v20-stacked-proof-of-replication-f571ee2386f4c65a68e802747f2d78691006fc81a67971c4d9641403fffece16.vk": { - "cid": "QmaKtFLShnhMGVn7P9UsHjkgqtqRFSwCStqqykBN7u8dax", - "digest": "834408e5c3fce6ec5d1bf64e64cee94e", - "sector_size": 16777216 + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { + "cid": "QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr", + "digest": "dc1ade9929ade1708238f155343044ac", + "sector_size": 2048 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { + "cid": "QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC", + "digest": "6c77597eb91ab936c1cef4cf19eba1b3", + "sector_size": 536870912 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { + "cid": "QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH", + "digest": "065179da19fbe515507267677f02823e", + "sector_size": 536870912 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { + "cid": "QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH", + "digest": "09e612e4eeb7a0eb95679a88404f960c", + "sector_size": 8388608 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { + "cid": "QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99", + "digest": "b687beb9adbd9dabe265a7e3620813e4", + "sector_size": 8388608 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { + "cid": "QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ", + "digest": "6a388072a518cf46ebd661f5cc46900a", + "sector_size": 34359738368 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { + "cid": "Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb", + "digest": "0c7b4aac1c40fdb7eb82bc355b41addf", + "sector_size": 34359738368 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { + "cid": "QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX", + "digest": "1801f8a6e1b00bceb00cc27314bb5ce3", + "sector_size": 68719476736 + }, + "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { + "cid": "QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN", + "digest": "a89884252c04c298d0b3c81bfd884164", + "sector_size": 68719476736 } } - diff --git a/build/version.go b/build/version.go index b1fd8b63d..77b98f008 100644 --- a/build/version.go +++ b/build/version.go @@ -1,13 +1,39 @@ package build -import "fmt" +import ( + "fmt" + + "golang.org/x/xerrors" +) var CurrentCommit string +var BuildType int + +const ( + BuildDefault = 0 + Build2k = 0x1 + BuildDebug = 0x3 +) + +func buildType() string { + switch BuildType { + case BuildDefault: + return "" + case BuildDebug: + return "+debug" + case Build2k: + return "+2k" + default: + return "+huh?" + } +} // BuildVersion is the local build version, set by build system -const BuildVersion = "0.1.6" +const BuildVersion = "0.8.0" -var UserVersion = BuildVersion + CurrentCommit +func UserVersion() string { + return BuildVersion + buildType() + CurrentCommit +} type Version uint32 @@ -30,9 +56,39 @@ func (ve Version) EqMajorMinor(v2 Version) bool { return ve&minorMask == v2&minorMask } -// APIVersion is a semver version of the rpc api exposed -var APIVersion Version = newVer(0, 1, 6) +type NodeType int +const ( + NodeUnknown NodeType = iota + + NodeFull + NodeMiner + NodeWorker +) + +var RunningNodeType NodeType + +func VersionForType(nodeType NodeType) (Version, error) { + switch nodeType { + case NodeFull: + return FullAPIVersion, nil + case NodeMiner: + return MinerAPIVersion, nil + case NodeWorker: + return WorkerAPIVersion, nil + default: + return Version(0), xerrors.Errorf("unknown node type %d", nodeType) + } +} + +// semver versions of the rpc api exposed +var ( + FullAPIVersion = newVer(0, 16, 0) + MinerAPIVersion = newVer(0, 15, 0) + WorkerAPIVersion = newVer(0, 15, 0) +) + +//nolint:varcheck,deadcode const ( majorMask = 0xff0000 minorMask = 0xffff00 diff --git a/chain/actors/actor_cron.go b/chain/actors/actor_cron.go deleted file mode 100644 index f0be6a439..000000000 --- a/chain/actors/actor_cron.go +++ /dev/null @@ -1,48 +0,0 @@ -package actors - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" -) - -type CronActor struct{} - -type callTuple struct { - addr address.Address - method uint64 -} - -var CronActors = []callTuple{ - {StoragePowerAddress, SPAMethods.CheckProofSubmissions}, -} - -type CronActorState struct{} - -type cAMethods struct { - EpochTick uint64 -} - -var CAMethods = cAMethods{2} - -func (ca CronActor) Exports() []interface{} { - return []interface{}{ - 1: nil, - 2: ca.EpochTick, - } -} - -func (ca CronActor) EpochTick(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - if vmctx.Message().From != CronAddress { - return nil, aerrors.New(1, "EpochTick is only callable as a part of tipset state computation") - } - - for _, call := range CronActors { - _, err := vmctx.Send(call.addr, call.method, types.NewInt(0), nil) - if err != nil { - return nil, err // todo: this very bad? - } - } - - return nil, nil -} diff --git a/chain/actors/actor_init.go b/chain/actors/actor_init.go deleted file mode 100644 index 45da1401e..000000000 --- a/chain/actors/actor_init.go +++ /dev/null @@ -1,242 +0,0 @@ -package actors - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-hamt-ipld" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log" - mh "github.com/multiformats/go-multihash" -) - -var log = logging.Logger("actors") - -var EmptyCBOR cid.Cid - -const ( - GasCreateActor = 100 -) - -func init() { - - n, err := cbor.WrapObject(map[string]string{}, mh.SHA2_256, -1) - if err != nil { - panic(err) // ok - } - - EmptyCBOR = n.Cid() -} - -type InitActor struct{} - -type InitActorState struct { - AddressMap cid.Cid - - NextID uint64 -} - -type iAMethods struct { - Exec uint64 -} - -var IAMethods = iAMethods{2} - -func (ia InitActor) Exports() []interface{} { - return []interface{}{ - 1: nil, - 2: ia.Exec, - } -} - -type ExecParams struct { - Code cid.Cid - Params []byte -} - -func CreateExecParams(act cid.Cid, obj cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { - encparams, err := SerializeParams(obj) - if err != nil { - return nil, aerrors.Wrap(err, "creating ExecParams") - } - - return SerializeParams(&ExecParams{ - Code: act, - Params: encparams, - }) -} - -func (ia InitActor) Exec(act *types.Actor, vmctx types.VMContext, p *ExecParams) ([]byte, aerrors.ActorError) { - beginState := vmctx.Storage().GetHead() - - var self InitActorState - if err := vmctx.Storage().Get(beginState, &self); err != nil { - return nil, err - } - - if err := vmctx.ChargeGas(GasCreateActor); err != nil { - return nil, aerrors.Wrap(err, "run out of gas") - } - - // Make sure that only the actors defined in the spec can be launched. - if !IsBuiltinActor(p.Code) { - return nil, aerrors.New(1, - "cannot launch actor instance that is not a builtin actor") - } - - // Ensure that singletons can be only launched once. - // TODO: do we want to enforce this? If so how should actors be marked as such? - if IsSingletonActor(p.Code) { - return nil, aerrors.New(1, "cannot launch another actor of this type") - } - - // This generates a unique address for this actor that is stable across message - // reordering - creator := vmctx.Message().From - nonce := vmctx.Message().Nonce - addr, err := ComputeActorAddress(creator, nonce) - if err != nil { - return nil, err - } - - // Set up the actor itself - actor := types.Actor{ - Code: p.Code, - Balance: types.NewInt(0), - Head: EmptyCBOR, - Nonce: 0, - } - - // The call to the actors constructor will set up the initial state - // from the given parameters, setting `actor.Head` to a new value when successful. - // TODO: can constructors fail? - //actor.Constructor(p.Params) - - // Store the mapping of address to actor ID. - idAddr, nerr := self.AddActor(vmctx.Ipld(), addr) - if nerr != nil { - return nil, aerrors.Escalate(err, "adding new actor mapping") - } - - // NOTE: This is a privileged call that only the init actor is allowed to make - // FIXME: Had to comment this because state is not in interface - state, err := vmctx.StateTree() - if err != nil { - return nil, err - } - - if err := state.SetActor(idAddr, &actor); err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { - return nil, aerrors.Absorb(err, 1, "SetActor, actor not found") - } - return nil, aerrors.Escalate(err, "inserting new actor into state tree") - } - - // '1' is reserved for constructor methods - _, err = vmctx.Send(idAddr, 1, vmctx.Message().Value, p.Params) - if err != nil { - return nil, err - } - - c, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, err - } - - if err := vmctx.Storage().Commit(beginState, c); err != nil { - return nil, err - } - - return idAddr.Bytes(), nil -} - -func IsBuiltinActor(code cid.Cid) bool { - switch code { - case StorageMarketCodeCid, StoragePowerCodeCid, StorageMinerCodeCid, AccountCodeCid, InitCodeCid, MultisigCodeCid, PaymentChannelCodeCid: - return true - default: - return false - } -} - -func IsSingletonActor(code cid.Cid) bool { - return code == StoragePowerCodeCid || code == StorageMarketCodeCid || code == InitCodeCid || code == CronCodeCid -} - -func (ias *InitActorState) AddActor(cst *hamt.CborIpldStore, addr address.Address) (address.Address, error) { - nid := ias.NextID - - amap, err := hamt.LoadNode(context.TODO(), cst, ias.AddressMap) - if err != nil { - return address.Undef, err - } - - if err := amap.Set(context.TODO(), string(addr.Bytes()), nid); err != nil { - return address.Undef, err - } - - if err := amap.Flush(context.TODO()); err != nil { - return address.Undef, err - } - - ncid, err := cst.Put(context.TODO(), amap) - if err != nil { - return address.Undef, err - } - ias.AddressMap = ncid - ias.NextID++ - - return NewIDAddress(nid) -} - -func (ias *InitActorState) Lookup(cst *hamt.CborIpldStore, addr address.Address) (address.Address, error) { - amap, err := hamt.LoadNode(context.TODO(), cst, ias.AddressMap) - if err != nil { - return address.Undef, xerrors.Errorf("ias lookup failed loading hamt node: %w", err) - } - - var val interface{} - err = amap.Find(context.TODO(), string(addr.Bytes()), &val) - if err != nil { - return address.Undef, xerrors.Errorf("ias lookup failed to do find: %w", err) - } - - ival, ok := val.(uint64) - if !ok { - return address.Undef, fmt.Errorf("invalid value in init actor state, expected uint64, got %T", val) - } - - return address.NewIDAddress(ival) -} - -type AccountActorState struct { - Address address.Address -} - -func ComputeActorAddress(creator address.Address, nonce uint64) (address.Address, ActorError) { - buf := new(bytes.Buffer) - _, err := buf.Write(creator.Bytes()) - if err != nil { - return address.Undef, aerrors.Escalate(err, "could not write address") - } - - err = binary.Write(buf, binary.BigEndian, nonce) - if err != nil { - return address.Undef, aerrors.Escalate(err, "could not write nonce") - } - - addr, err := address.NewActorAddress(buf.Bytes()) - if err != nil { - return address.Undef, aerrors.Escalate(err, "could not create address") - } - return addr, nil -} diff --git a/chain/actors/actor_miner.go b/chain/actors/actor_miner.go deleted file mode 100644 index 4e3e33096..000000000 --- a/chain/actors/actor_miner.go +++ /dev/null @@ -1,1248 +0,0 @@ -package actors - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - - ffi "github.com/filecoin-project/filecoin-ffi" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-sectorbuilder" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/filecoin-project/go-amt-ipld" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - "go.opencensus.io/trace" - "golang.org/x/xerrors" -) - -const MaxSectors = 1 << 48 -const RLEMax = 100e3 - -type StorageMinerActor struct{} - -type StorageMinerActorState struct { - // PreCommittedSectors is the set of sectors that have been committed to but not - // yet had their proofs submitted - PreCommittedSectors map[string]*PreCommittedSector - - // All sectors this miner has committed. - // - // AMT[sectorID]ffi.PublicSectorInfo - Sectors cid.Cid - - // TODO: Spec says 'StagedCommittedSectors', which one is it? - - // Sectors this miner is currently mining. It is only updated - // when a PoSt is submitted (not as each new sector commitment is added). - // - // AMT[sectorID]ffi.PublicSectorInfo - ProvingSet cid.Cid - - // TODO: these: - // SectorTable - // SectorExpirationQueue - // ChallengeStatus - - // Contains mostly static info about this miner - Info cid.Cid - - // Faulty sectors reported since last SubmitPost - FaultSet types.BitField - - LastFaultSubmission uint64 - - // Amount of power this miner has. - Power types.BigInt - - // Active is set to true after the miner has submitted their first PoSt - Active bool - - // The height at which this miner was slashed at. - SlashedAt uint64 - - ElectionPeriodStart uint64 -} - -type MinerInfo struct { - // Account that owns this miner. - // - Income and returned collateral are paid to this address. - // - This address is also allowed to change the worker address for the miner. - Owner address.Address - - // Worker account for this miner. - // This will be the key that is used to sign blocks created by this miner, and - // sign messages sent on behalf of this miner to commit sectors, submit PoSts, and - // other day to day miner activities. - Worker address.Address - - // Libp2p identity that should be used when connecting to this miner. - PeerID peer.ID - - // Amount of space in each sector committed to the network by this miner. - SectorSize uint64 - - // SubsectorCount -} - -type PreCommittedSector struct { - Info SectorPreCommitInfo - ReceivedEpoch uint64 -} - -type StorageMinerConstructorParams struct { - Owner address.Address - Worker address.Address - SectorSize uint64 - PeerID peer.ID -} - -type SectorPreCommitInfo struct { - SectorNumber uint64 - - CommR []byte // TODO: Spec says CID - SealEpoch uint64 - DealIDs []uint64 -} - -type maMethods struct { - Constructor uint64 - PreCommitSector uint64 - ProveCommitSector uint64 - SubmitFallbackPoSt uint64 - SlashStorageFault uint64 - GetCurrentProvingSet uint64 - ArbitrateDeal uint64 - DePledge uint64 - GetOwner uint64 - GetWorkerAddr uint64 - GetPower uint64 - GetPeerID uint64 - GetSectorSize uint64 - UpdatePeerID uint64 - ChangeWorker uint64 - IsSlashed uint64 - CheckMiner uint64 - DeclareFaults uint64 - SlashConsensusFault uint64 - SubmitElectionPoSt uint64 -} - -var MAMethods = maMethods{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} - -func (sma StorageMinerActor) Exports() []interface{} { - return []interface{}{ - 1: sma.StorageMinerConstructor, - 2: sma.PreCommitSector, - 3: withUpdates( - update{start: 0, method: sma.ProveCommitSectorV0}, - update{start: build.ForkNoPowerEPSUpdates, method: sma.ProveCommitSectorV1}, - ), - 4: sma.SubmitFallbackPoSt, - //5: sma.SlashStorageFault, - //6: sma.GetCurrentProvingSet, - //7: sma.ArbitrateDeal, - //8: sma.DePledge, - 9: sma.GetOwner, - 10: sma.GetWorkerAddr, - 11: sma.GetPower, // TODO: Remove - 12: sma.GetPeerID, - 13: sma.GetSectorSize, - 14: sma.UpdatePeerID, - //15: sma.ChangeWorker, - 16: sma.IsSlashed, - 17: sma.CheckMiner, - 18: withUpdates( - update{start: 0, method: sma.DeclareFaultsV0}, - update{start: build.ForkNoPowerEPSUpdates, method: sma.DeclareFaultsV1}, - ), - 19: sma.SlashConsensusFault, - 20: sma.SubmitElectionPoSt, - } -} - -func loadState(vmctx types.VMContext) (cid.Cid, *StorageMinerActorState, ActorError) { - var self StorageMinerActorState - oldstate := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(oldstate, &self); err != nil { - return cid.Undef, nil, err - } - - return oldstate, &self, nil -} - -func loadMinerInfo(vmctx types.VMContext, m *StorageMinerActorState) (*MinerInfo, ActorError) { - var mi MinerInfo - if err := vmctx.Storage().Get(m.Info, &mi); err != nil { - return nil, err - } - - return &mi, nil -} - -func (sma StorageMinerActor) StorageMinerConstructor(act *types.Actor, vmctx types.VMContext, params *StorageMinerConstructorParams) ([]byte, ActorError) { - minerInfo := &MinerInfo{ - Owner: params.Owner, - Worker: params.Worker, - PeerID: params.PeerID, - SectorSize: params.SectorSize, - } - - minfocid, err := vmctx.Storage().Put(minerInfo) - if err != nil { - return nil, err - } - - var self StorageMinerActorState - sectors := amt.NewAMT(types.WrapStorage(vmctx.Storage())) - scid, serr := sectors.Flush() - if serr != nil { - return nil, aerrors.HandleExternalError(serr, "initializing AMT") - } - - self.Sectors = scid - self.ProvingSet = scid - self.Info = minfocid - - storage := vmctx.Storage() - c, err := storage.Put(&self) - if err != nil { - return nil, err - } - - if err := storage.Commit(EmptyCBOR, c); err != nil { - return nil, err - } - - return nil, nil -} - -func (sma StorageMinerActor) PreCommitSector(act *types.Actor, vmctx types.VMContext, params *SectorPreCommitInfo) ([]byte, ActorError) { - ctx := vmctx.Context() - oldstate, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - if params.SealEpoch >= vmctx.BlockHeight()+build.SealRandomnessLookback { - return nil, aerrors.Newf(1, "sector commitment must be based off past randomness (%d >= %d)", params.SealEpoch, vmctx.BlockHeight()+build.SealRandomnessLookback) - } - - if vmctx.BlockHeight()-params.SealEpoch+build.SealRandomnessLookback > build.SealRandomnessLookbackLimit { - return nil, aerrors.Newf(2, "sector commitment must be recent enough (was %d)", vmctx.BlockHeight()-params.SealEpoch+build.SealRandomnessLookback) - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - if vmctx.Message().From != mi.Worker { - return nil, aerrors.New(1, "not authorized to precommit sector for miner") - } - - // make sure the miner isnt trying to submit a pre-existing sector - unique, err := SectorIsUnique(ctx, vmctx.Storage(), self.Sectors, params.SectorNumber) - if err != nil { - return nil, err - } - if !unique { - return nil, aerrors.New(3, "sector already committed!") - } - - // Power of the miner after adding this sector - futurePower := types.BigAdd(self.Power, types.NewInt(mi.SectorSize)) - collateralRequired := CollateralForPower(futurePower) - - // TODO: grab from market? - if act.Balance.LessThan(collateralRequired) { - return nil, aerrors.New(4, "not enough collateral") - } - - self.PreCommittedSectors[uintToStringKey(params.SectorNumber)] = &PreCommittedSector{ - Info: *params, - ReceivedEpoch: vmctx.BlockHeight(), - } - - if len(self.PreCommittedSectors) > 4096 { - return nil, aerrors.New(5, "too many precommitted sectors") - } - - nstate, err := vmctx.Storage().Put(self) - if err != nil { - return nil, err - } - if err := vmctx.Storage().Commit(oldstate, nstate); err != nil { - return nil, err - } - - return nil, nil -} - -func uintToStringKey(i uint64) string { - buf := make([]byte, 10) - n := binary.PutUvarint(buf, i) - return string(buf[:n]) -} - -type SectorProveCommitInfo struct { - Proof []byte - SectorID uint64 - DealIDs []uint64 -} - -func (sma StorageMinerActor) ProveCommitSectorV0(act *types.Actor, vmctx types.VMContext, params *SectorProveCommitInfo) ([]byte, ActorError) { - ctx := vmctx.Context() - oldstate, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - us, ok := self.PreCommittedSectors[uintToStringKey(params.SectorID)] - if !ok { - return nil, aerrors.New(1, "no pre-commitment found for sector") - } - - if us.ReceivedEpoch+build.InteractivePoRepDelay >= vmctx.BlockHeight() { - return nil, aerrors.New(2, "too early for proof submission") - } - - delete(self.PreCommittedSectors, uintToStringKey(params.SectorID)) - - // TODO: ensure normalization to ID address - maddr := vmctx.Message().To - - ticket, err := vmctx.GetRandomness(us.Info.SealEpoch - build.SealRandomnessLookback) - if err != nil { - return nil, aerrors.Wrap(err, "failed to get ticket randomness") - } - - seed, err := vmctx.GetRandomness(us.ReceivedEpoch + build.InteractivePoRepDelay) - if err != nil { - return nil, aerrors.Wrap(err, "failed to get randomness for prove sector commitment") - } - - enc, err := SerializeParams(&ComputeDataCommitmentParams{ - DealIDs: params.DealIDs, - SectorSize: mi.SectorSize, - }) - if err != nil { - return nil, aerrors.Wrap(err, "failed to serialize ComputeDataCommitmentParams") - } - - commD, err := vmctx.Send(StorageMarketAddress, SMAMethods.ComputeDataCommitment, types.NewInt(0), enc) - if err != nil { - return nil, aerrors.Wrapf(err, "failed to compute data commitment (sector %d, deals: %v)", params.SectorID, params.DealIDs) - } - - if ok, err := vmctx.Sys().ValidatePoRep(ctx, maddr, mi.SectorSize, commD, us.Info.CommR, ticket, params.Proof, seed, params.SectorID); err != nil { - return nil, err - } else if !ok { - return nil, aerrors.Newf(2, "porep proof was invalid (t:%x; s:%x(%d); p:%s)", ticket, seed, us.ReceivedEpoch+build.InteractivePoRepDelay, truncateHexPrint(params.Proof)) - } - - // Note: There must exist a unique index in the miner's sector set for each - // sector ID. The `faults`, `recovered`, and `done` parameters of the - // SubmitPoSt method express indices into this sector set. - nssroot, err := AddToSectorSet(ctx, types.WrapStorage(vmctx.Storage()), self.Sectors, params.SectorID, us.Info.CommR, commD) - if err != nil { - return nil, err - } - self.Sectors = nssroot - - // if miner is not mining, start their proving period now - // Note: As written here, every miners first PoSt will only be over one sector. - // We could set up a 'grace period' for starting mining that would allow miners - // to submit several sectors for their first proving period. Alternatively, we - // could simply make the 'PreCommitSector' call take multiple sectors at a time. - // - // Note: Proving period is a function of sector size; small sectors take less - // time to prove than large sectors do. Sector size is selected when pledging. - pss, lerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingSet) - if lerr != nil { - return nil, aerrors.HandleExternalError(lerr, "could not load proving set node") - } - - if pss.Count == 0 { - self.ProvingSet = self.Sectors - // TODO: probably want to wait until the miner is above a certain - // threshold before starting this - self.ElectionPeriodStart = vmctx.BlockHeight() - } - - nstate, err := vmctx.Storage().Put(self) - if err != nil { - return nil, err - } - if err := vmctx.Storage().Commit(oldstate, nstate); err != nil { - return nil, err - } - - activateParams, err := SerializeParams(&ActivateStorageDealsParams{ - Deals: params.DealIDs, - }) - if err != nil { - return nil, err - } - - _, err = vmctx.Send(StorageMarketAddress, SMAMethods.ActivateStorageDeals, types.NewInt(0), activateParams) - return nil, aerrors.Wrapf(err, "calling ActivateStorageDeals failed") -} - -func (sma StorageMinerActor) ProveCommitSectorV1(act *types.Actor, vmctx types.VMContext, params *SectorProveCommitInfo) ([]byte, ActorError) { - ctx := vmctx.Context() - oldstate, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - if vmctx.Message().From != mi.Worker { - return nil, aerrors.New(1, "not authorized to submit sector proof for miner") - } - - us, ok := self.PreCommittedSectors[uintToStringKey(params.SectorID)] - if !ok { - return nil, aerrors.New(1, "no pre-commitment found for sector") - } - - if us.ReceivedEpoch+build.InteractivePoRepDelay >= vmctx.BlockHeight() { - return nil, aerrors.New(2, "too early for proof submission") - } - - delete(self.PreCommittedSectors, uintToStringKey(params.SectorID)) - - // TODO: ensure normalization to ID address - maddr := vmctx.Message().To - - ticket, err := vmctx.GetRandomness(us.Info.SealEpoch - build.SealRandomnessLookback) - if err != nil { - return nil, aerrors.Wrap(err, "failed to get ticket randomness") - } - - seed, err := vmctx.GetRandomness(us.ReceivedEpoch + build.InteractivePoRepDelay) - if err != nil { - return nil, aerrors.Wrap(err, "failed to get randomness for prove sector commitment") - } - - enc, err := SerializeParams(&ComputeDataCommitmentParams{ - DealIDs: params.DealIDs, - SectorSize: mi.SectorSize, - }) - if err != nil { - return nil, aerrors.Wrap(err, "failed to serialize ComputeDataCommitmentParams") - } - - commD, err := vmctx.Send(StorageMarketAddress, SMAMethods.ComputeDataCommitment, types.NewInt(0), enc) - if err != nil { - return nil, aerrors.Wrapf(err, "failed to compute data commitment (sector %d, deals: %v)", params.SectorID, params.DealIDs) - } - - if ok, err := vmctx.Sys().ValidatePoRep(ctx, maddr, mi.SectorSize, commD, us.Info.CommR, ticket, params.Proof, seed, params.SectorID); err != nil { - return nil, err - } else if !ok { - return nil, aerrors.Newf(2, "porep proof was invalid (t:%x; s:%x(%d); p:%s)", ticket, seed, us.ReceivedEpoch+build.InteractivePoRepDelay, truncateHexPrint(params.Proof)) - } - - // Note: There must exist a unique index in the miner's sector set for each - // sector ID. The `faults`, `recovered`, and `done` parameters of the - // SubmitPoSt method express indices into this sector set. - nssroot, err := AddToSectorSet(ctx, types.WrapStorage(vmctx.Storage()), self.Sectors, params.SectorID, us.Info.CommR, commD) - if err != nil { - return nil, err - } - self.Sectors = nssroot - - // if miner is not mining, start their proving period now - // Note: As written here, every miners first PoSt will only be over one sector. - // We could set up a 'grace period' for starting mining that would allow miners - // to submit several sectors for their first proving period. Alternatively, we - // could simply make the 'PreCommitSector' call take multiple sectors at a time. - // - // Note: Proving period is a function of sector size; small sectors take less - // time to prove than large sectors do. Sector size is selected when pledging. - pss, lerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingSet) - if lerr != nil { - return nil, aerrors.HandleExternalError(lerr, "could not load proving set node") - } - - if pss.Count == 0 && !self.Active { - self.ProvingSet = self.Sectors - // TODO: probably want to wait until the miner is above a certain - // threshold before starting this - self.ElectionPeriodStart = vmctx.BlockHeight() - } - - nstate, err := vmctx.Storage().Put(self) - if err != nil { - return nil, err - } - if err := vmctx.Storage().Commit(oldstate, nstate); err != nil { - return nil, err - } - - activateParams, err := SerializeParams(&ActivateStorageDealsParams{ - Deals: params.DealIDs, - }) - if err != nil { - return nil, err - } - - _, err = vmctx.Send(StorageMarketAddress, SMAMethods.ActivateStorageDeals, types.NewInt(0), activateParams) - return nil, aerrors.Wrapf(err, "calling ActivateStorageDeals failed") -} - -func truncateHexPrint(b []byte) string { - s := fmt.Sprintf("%x", b) - if len(s) > 60 { - return s[:20] + "..." + s[len(s)-20:] - } - return s -} - -type SubmitFallbackPoStParams struct { - Proof []byte - Candidates []types.EPostTicket -} - -func (sma StorageMinerActor) SubmitFallbackPoSt(act *types.Actor, vmctx types.VMContext, params *SubmitFallbackPoStParams) ([]byte, ActorError) { - oldstate, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - if vmctx.Message().From != mi.Worker { - return nil, aerrors.New(1, "not authorized to submit post for miner") - } - - /* - // TODO: handle fees - msgVal := vmctx.Message().Value - if msgVal.LessThan(feesRequired) { - return nil, aerrors.New(2, "not enough funds to pay post submission fees") - } - - if msgVal.GreaterThan(feesRequired) { - _, err := vmctx.Send(vmctx.Message().From, 0, - types.BigSub(msgVal, feesRequired), nil) - if err != nil { - return nil, aerrors.Wrap(err, "could not refund excess fees") - } - } - */ - - var seed [sectorbuilder.CommLen]byte - { - randHeight := self.ElectionPeriodStart + build.FallbackPoStDelay - if vmctx.BlockHeight() <= randHeight { - // TODO: spec, retcode - return nil, aerrors.Newf(1, "submit fallback PoSt called too early (%d < %d)", vmctx.BlockHeight(), randHeight) - } - - rand, err := vmctx.GetRandomness(randHeight) - - if err != nil { - return nil, aerrors.Wrap(err, "could not get randomness for PoST") - } - if len(rand) < len(seed) { - return nil, aerrors.Escalate(fmt.Errorf("randomness too small (%d < %d)", - len(rand), len(seed)), "improper randomness") - } - copy(seed[:], rand) - } - - pss, lerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingSet) - if lerr != nil { - return nil, aerrors.HandleExternalError(lerr, "could not load proving set node") - } - - { - c, nerr := self.FaultSet.Count() - if nerr != nil { - return nil, aerrors.Absorb(nerr, 6, "invalid bitfield") - } - - if c > RLEMax { - return nil, aerrors.Newf(7, "too many items in bitfield: %d", c) - } - } - - faults, nerr := self.FaultSet.AllMap() - if nerr != nil { - return nil, aerrors.Absorb(err, 5, "RLE+ invalid") - } - - var sectorInfos []ffi.PublicSectorInfo - if err := pss.ForEach(func(id uint64, v *cbg.Deferred) error { - if faults[id] { - return nil - } - - var comms [][]byte - if err := cbor.DecodeInto(v.Raw, &comms); err != nil { - return xerrors.New("could not decode comms") - } - si := ffi.PublicSectorInfo{ - SectorID: id, - } - commR := comms[0] - if len(commR) != len(si.CommR) { - return xerrors.Errorf("commR length is wrong: %d", len(commR)) - } - copy(si.CommR[:], commR) - - sectorInfos = append(sectorInfos, si) - - return nil - }); err != nil { - return nil, aerrors.Absorb(err, 3, "could not decode sectorset") - } - - proverID := vmctx.Message().To // TODO: normalize to ID address - - var candidates []sectorbuilder.EPostCandidate - for _, t := range params.Candidates { - var partial [32]byte - copy(partial[:], t.Partial) - candidates = append(candidates, sectorbuilder.EPostCandidate{ - PartialTicket: partial, - SectorID: t.SectorID, - SectorChallengeIndex: t.ChallengeIndex, - }) - } - - if ok, lerr := sectorbuilder.VerifyFallbackPost(vmctx.Context(), mi.SectorSize, - sectorbuilder.NewSortedPublicSectorInfo(sectorInfos), seed[:], params.Proof, candidates, proverID, 0); !ok || lerr != nil { // TODO: FORK - set faults to len(faults) - if lerr != nil { - // TODO: study PoST errors - return nil, aerrors.Absorb(lerr, 4, "PoST error") - } - if !ok { - return nil, aerrors.New(4, "PoST invalid") - } - } - - // Post submission is successful! - if err := onSuccessfulPoSt(self, vmctx); err != nil { - return nil, err - } - - c, err := vmctx.Storage().Put(self) - if err != nil { - return nil, err - } - - if err := vmctx.Storage().Commit(oldstate, c); err != nil { - return nil, err - } - - return nil, nil -} - -func (sma StorageMinerActor) GetPower(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - _, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - return self.Power.Bytes(), nil -} - -func SectorIsUnique(ctx context.Context, s types.Storage, sroot cid.Cid, sid uint64) (bool, ActorError) { - found, _, _, err := GetFromSectorSet(ctx, s, sroot, sid) - if err != nil { - return false, err - } - - return !found, nil -} - -func AddToSectorSet(ctx context.Context, blks amt.Blocks, ss cid.Cid, sectorID uint64, commR, commD []byte) (cid.Cid, ActorError) { - if sectorID >= MaxSectors { - return cid.Undef, aerrors.Newf(25, "sector ID out of range: %d", sectorID) - } - ssr, err := amt.LoadAMT(blks, ss) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "could not load sector set node") - } - - // TODO: Spec says to use SealCommitment, and construct commD from deals each time, - // but that would make SubmitPoSt way, way more expensive - if err := ssr.Set(sectorID, [][]byte{commR, commD}); err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to set commitment in sector set") - } - - ncid, err := ssr.Flush() - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to flush sector set") - } - - return ncid, nil -} - -func GetFromSectorSet(ctx context.Context, s types.Storage, ss cid.Cid, sectorID uint64) (bool, []byte, []byte, ActorError) { - if sectorID >= MaxSectors { - return false, nil, nil, aerrors.Newf(25, "sector ID out of range: %d", sectorID) - } - - ssr, err := amt.LoadAMT(types.WrapStorage(s), ss) - if err != nil { - return false, nil, nil, aerrors.HandleExternalError(err, "could not load sector set node") - } - - var comms [][]byte - err = ssr.Get(sectorID, &comms) - if err != nil { - if _, ok := err.(*amt.ErrNotFound); ok { - return false, nil, nil, nil - } - return false, nil, nil, aerrors.HandleExternalError(err, "failed to find sector in sector set") - } - - if len(comms) != 2 { - return false, nil, nil, aerrors.Newf(20, "sector set entry should only have 2 elements") - } - - return true, comms[0], comms[1], nil -} - -func RemoveFromSectorSet(ctx context.Context, s types.Storage, ss cid.Cid, ids []uint64) (cid.Cid, aerrors.ActorError) { - - ssr, err := amt.LoadAMT(types.WrapStorage(s), ss) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "could not load sector set node") - } - - for _, id := range ids { - if err := ssr.Delete(id); err != nil { - log.Warnf("failed to delete sector %d from set: %s", id, err) - } - } - - ncid, err := ssr.Flush() - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to flush sector set") - } - - return ncid, nil -} - -func ValidatePoRep(ctx context.Context, maddr address.Address, ssize uint64, commD, commR, ticket, proof, seed []byte, sectorID uint64) (bool, ActorError) { - _, span := trace.StartSpan(ctx, "ValidatePoRep") - defer span.End() - ok, err := sectorbuilder.VerifySeal(ssize, commR, commD, maddr, ticket, seed, sectorID, proof) - if err != nil { - return false, aerrors.Absorb(err, 25, "verify seal failed") - } - - return ok, nil -} - -func CollateralForPower(power types.BigInt) types.BigInt { - return types.BigMul(power, types.NewInt(10)) - /* TODO: this - availableFil = FakeGlobalMethods.GetAvailableFil() - totalNetworkPower = StorageMinerActor.GetTotalStorage() - numMiners = StorageMarket.GetMinerCount() - powerCollateral = availableFil * NetworkConstants.POWER_COLLATERAL_PROPORTION * power / totalNetworkPower - perCapitaCollateral = availableFil * NetworkConstants.PER_CAPITA_COLLATERAL_PROPORTION / numMiners - collateralRequired = math.Ceil(minerPowerCollateral + minerPerCapitaCollateral) - return collateralRequired - */ -} - -func (sma StorageMinerActor) GetWorkerAddr(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - _, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - return mi.Worker.Bytes(), nil -} - -func (sma StorageMinerActor) GetOwner(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - _, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - return mi.Owner.Bytes(), nil -} - -func (sma StorageMinerActor) GetPeerID(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - _, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - return []byte(mi.PeerID), nil -} - -type UpdatePeerIDParams struct { - PeerID peer.ID -} - -func (sma StorageMinerActor) UpdatePeerID(act *types.Actor, vmctx types.VMContext, params *UpdatePeerIDParams) ([]byte, ActorError) { - oldstate, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - if vmctx.Message().From != mi.Worker { - return nil, aerrors.New(2, "only the mine worker may update the peer ID") - } - - mi.PeerID = params.PeerID - - mic, err := vmctx.Storage().Put(mi) - if err != nil { - return nil, err - } - - self.Info = mic - - c, err := vmctx.Storage().Put(self) - if err != nil { - return nil, err - } - - if err := vmctx.Storage().Commit(oldstate, c); err != nil { - return nil, err - } - - return nil, nil -} - -func (sma StorageMinerActor) GetSectorSize(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - _, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - mi, err := loadMinerInfo(vmctx, self) - if err != nil { - return nil, err - } - - return types.NewInt(mi.SectorSize).Bytes(), nil -} - -func isLate(height uint64, self *StorageMinerActorState) bool { - return self.ElectionPeriodStart > 0 && height >= self.ElectionPeriodStart+build.SlashablePowerDelay -} - -func (sma StorageMinerActor) IsSlashed(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - _, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - return cbg.EncodeBool(self.SlashedAt != 0), nil -} - -type CheckMinerParams struct { - NetworkPower types.BigInt -} - -// TODO: better name -func (sma StorageMinerActor) CheckMiner(act *types.Actor, vmctx types.VMContext, params *CheckMinerParams) ([]byte, ActorError) { - if vmctx.Message().From != StoragePowerAddress { - return nil, aerrors.New(2, "only the storage power actor can check miner") - } - - oldstate, self, err := loadState(vmctx) - if err != nil { - return nil, err - } - - if !isLate(vmctx.BlockHeight(), self) { - // Everything's fine - return nil, nil - } - - if self.SlashedAt != 0 { - // Don't slash more than necessary - return nil, nil - } - - if params.NetworkPower.Equals(self.Power) { - // Don't break the network when there's only one miner left - - log.Warnf("can't slash miner %s for missed PoSt, no power would be left in the network", vmctx.Message().To) - return nil, nil - } - - // Slash for being late - - self.SlashedAt = vmctx.BlockHeight() - - nstate, err := vmctx.Storage().Put(self) - if err != nil { - return nil, err - } - if err := vmctx.Storage().Commit(oldstate, nstate); err != nil { - return nil, err - } - - var out bytes.Buffer - if err := self.Power.MarshalCBOR(&out); err != nil { - return nil, aerrors.HandleExternalError(err, "marshaling return value") - } - return out.Bytes(), nil -} - -type DeclareFaultsParams struct { - Faults types.BitField -} - -func (sma StorageMinerActor) DeclareFaultsV0(act *types.Actor, vmctx types.VMContext, params *DeclareFaultsParams) ([]byte, ActorError) { - oldstate, self, aerr := loadState(vmctx) - if aerr != nil { - return nil, aerr - } - - nfaults, err := types.MergeBitFields(params.Faults, self.FaultSet) - if err != nil { - return nil, aerrors.Absorb(err, 1, "failed to merge bitfields") - } - - self.FaultSet = nfaults - - self.LastFaultSubmission = vmctx.BlockHeight() - - nstate, aerr := vmctx.Storage().Put(self) - if err != nil { // TODO: FORK: should be aerr - return nil, aerr - } - if err := vmctx.Storage().Commit(oldstate, nstate); err != nil { - return nil, err - } - - return nil, nil -} - -func (sma StorageMinerActor) DeclareFaultsV1(act *types.Actor, vmctx types.VMContext, params *DeclareFaultsParams) ([]byte, ActorError) { - oldstate, self, aerr := loadState(vmctx) - if aerr != nil { - return nil, aerr - } - - mi, aerr := loadMinerInfo(vmctx, self) - if aerr != nil { - return nil, aerr - } - - if vmctx.Message().From != mi.Worker { - return nil, aerrors.New(1, "not authorized to declare faults for miner") - } - - nfaults, err := types.MergeBitFields(params.Faults, self.FaultSet) - if err != nil { - return nil, aerrors.Absorb(err, 1, "failed to merge bitfields") - } - - self.FaultSet = nfaults - - self.LastFaultSubmission = vmctx.BlockHeight() - - nstate, aerr := vmctx.Storage().Put(self) - if aerr != nil { - return nil, aerr - } - if err := vmctx.Storage().Commit(oldstate, nstate); err != nil { - return nil, err - } - - return nil, nil -} - -type MinerSlashConsensusFault struct { - Slasher address.Address - AtHeight uint64 - SlashedCollateral types.BigInt -} - -func (sma StorageMinerActor) SlashConsensusFault(act *types.Actor, vmctx types.VMContext, params *MinerSlashConsensusFault) ([]byte, ActorError) { - if vmctx.Message().From != StoragePowerAddress { - return nil, aerrors.New(1, "SlashConsensusFault may only be called by the storage market actor") - } - - slashedCollateral := params.SlashedCollateral - if slashedCollateral.LessThan(act.Balance) { - slashedCollateral = act.Balance - } - - // Some of the slashed collateral should be paid to the slasher - // GROWTH_RATE determines how fast the slasher share of slashed collateral will increase as block elapses - // current GROWTH_RATE results in SLASHER_SHARE reaches 1 after 30 blocks - // TODO: define arithmetic precision and rounding for this operation - blockElapsed := vmctx.BlockHeight() - params.AtHeight - - slasherShare := slasherShare(params.SlashedCollateral, blockElapsed) - - burnPortion := types.BigSub(slashedCollateral, slasherShare) - - _, err := vmctx.Send(vmctx.Message().From, 0, slasherShare, nil) - if err != nil { - return nil, aerrors.Wrap(err, "failed to pay slasher") - } - - _, err = vmctx.Send(BurntFundsAddress, 0, burnPortion, nil) - if err != nil { - return nil, aerrors.Wrap(err, "failed to burn funds") - } - - // TODO: this still allows the miner to commit sectors and submit posts, - // their users could potentially be unaffected, but the miner will never be - // able to mine a block again - // One potential issue: the miner will have to pay back the slashed - // collateral to continue submitting PoSts, which includes pledge - // collateral that they no longer really 'need' - - return nil, nil -} - -func (sma StorageMinerActor) SubmitElectionPoSt(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) { - if vmctx.Message().From != NetworkAddress { - return nil, aerrors.Newf(1, "submit election post can only be called by the storage power actor") - } - - oldstate, self, aerr := loadState(vmctx) - if aerr != nil { - return nil, aerr - } - - if self.SlashedAt != 0 { - return nil, aerrors.New(1, "slashed miners can't perform election PoSt") - } - - if err := onSuccessfulPoSt(self, vmctx); err != nil { - return nil, err - } - - ncid, err := vmctx.Storage().Put(self) - if err != nil { - return nil, err - } - if err := vmctx.Storage().Commit(oldstate, ncid); err != nil { - return nil, err - } - - return nil, nil -} - -func onSuccessfulPoSt(self *StorageMinerActorState, vmctx types.VMContext) aerrors.ActorError { - if vmctx.BlockHeight() >= build.ForkNoPowerEPSUpdates { - return onSuccessfulPoStV1(self, vmctx) - } - - return onSuccessfulPoStV0(self, vmctx) -} - -func onSuccessfulPoStV0(self *StorageMinerActorState, vmctx types.VMContext) aerrors.ActorError { - var mi MinerInfo - if err := vmctx.Storage().Get(self.Info, &mi); err != nil { - return err - } - - pss, nerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingSet) - if nerr != nil { - return aerrors.HandleExternalError(nerr, "failed to load proving set") - } - - faults, nerr := self.FaultSet.All() - if nerr != nil { - return aerrors.Absorb(nerr, 1, "invalid bitfield (fatal?)") - } - - self.FaultSet = types.NewBitField() - - oldPower := self.Power - newPower := types.BigMul(types.NewInt(pss.Count-uint64(len(faults))), types.NewInt(mi.SectorSize)) - - // If below the minimum size requirement, miners have zero power - if newPower.LessThan(types.NewInt(build.MinimumMinerPower)) { - newPower = types.NewInt(0) - } - - self.Power = newPower - - delta := types.BigSub(self.Power, oldPower) - if self.SlashedAt != 0 { - self.SlashedAt = 0 - delta = self.Power - } - - prevSlashingDeadline := self.ElectionPeriodStart + build.SlashablePowerDelay - if !self.Active && newPower.GreaterThan(types.NewInt(0)) { - self.Active = true - prevSlashingDeadline = 0 - } - - if !(oldPower.IsZero() && newPower.IsZero()) { - enc, err := SerializeParams(&UpdateStorageParams{ - Delta: delta, - NextSlashDeadline: vmctx.BlockHeight() + build.SlashablePowerDelay, - PreviousSlashDeadline: prevSlashingDeadline, - }) - if err != nil { - return err - } - - _, err = vmctx.Send(StoragePowerAddress, SPAMethods.UpdateStorage, types.NewInt(0), enc) - if err != nil { - return aerrors.Wrap(err, "updating storage failed") - } - } - - ncid, err := RemoveFromSectorSet(vmctx.Context(), vmctx.Storage(), self.Sectors, faults) - if err != nil { - return err - } - - self.Sectors = ncid - self.ProvingSet = ncid - self.ElectionPeriodStart = vmctx.BlockHeight() - return nil -} - -func onSuccessfulPoStV1(self *StorageMinerActorState, vmctx types.VMContext) aerrors.ActorError { - // TODO: some sector upkeep stuff that is very haphazard and unclear in the spec - - var mi MinerInfo - if err := vmctx.Storage().Get(self.Info, &mi); err != nil { - return err - } - - pss, nerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingSet) - if nerr != nil { - return aerrors.HandleExternalError(nerr, "failed to load proving set") - } - - { - c, nerr := self.FaultSet.Count() - if nerr != nil { - return aerrors.Absorb(nerr, 2, "invalid bitfield") - } - if c > RLEMax { - return aerrors.Newf(3, "too many items in bitfield: %d", c) - } - } - - faults, nerr := self.FaultSet.All() - if nerr != nil { - return aerrors.Absorb(nerr, 1, "invalid bitfield (fatal?)") - } - - self.FaultSet = types.NewBitField() - - oldPower := self.Power - newPower := types.BigMul(types.NewInt(pss.Count-uint64(len(faults))), types.NewInt(mi.SectorSize)) - - // If below the minimum size requirement, miners have zero power - if newPower.LessThan(types.NewInt(build.MinimumMinerPower)) { - newPower = types.NewInt(0) - } - - self.Power = newPower - - delta := types.BigSub(self.Power, oldPower) - if self.SlashedAt != 0 { - self.SlashedAt = 0 - delta = self.Power - } - - prevSlashingDeadline := self.ElectionPeriodStart + build.SlashablePowerDelay - if !self.Active && newPower.GreaterThan(types.NewInt(0)) { - self.Active = true - prevSlashingDeadline = 0 - } - - if !(oldPower.IsZero() && newPower.IsZero()) { - enc, err := SerializeParams(&UpdateStorageParams{ - Delta: delta, - NextSlashDeadline: vmctx.BlockHeight() + build.SlashablePowerDelay, - PreviousSlashDeadline: prevSlashingDeadline, - }) - if err != nil { - return err - } - - _, err = vmctx.Send(StoragePowerAddress, SPAMethods.UpdateStorage, types.NewInt(0), enc) - if err != nil { - return aerrors.Wrap(err, "updating storage failed") - } - - self.ElectionPeriodStart = vmctx.BlockHeight() - } - - ncid, err := RemoveFromSectorSet(vmctx.Context(), vmctx.Storage(), self.Sectors, faults) - if err != nil { - return err - } - - self.Sectors = ncid - self.ProvingSet = ncid - return nil -} - -func slasherShare(total types.BigInt, elapsed uint64) types.BigInt { - // [int(pow(1.26, n) * 10) for n in range(30)] - fracs := []uint64{10, 12, 15, 20, 25, 31, 40, 50, 63, 80, 100, 127, 160, 201, 254, 320, 403, 508, 640, 807, 1017, 1281, 1614, 2034, 2563, 3230, 4070, 5128, 6462, 8142} - const precision = 10000 - - var frac uint64 - if elapsed >= uint64(len(fracs)) { - return total - } else { - frac = fracs[elapsed] - } - - return types.BigDiv( - types.BigMul( - types.NewInt(frac), - total, - ), - types.NewInt(precision), - ) -} diff --git a/chain/actors/actor_miner_test.go b/chain/actors/actor_miner_test.go deleted file mode 100644 index a522fe349..000000000 --- a/chain/actors/actor_miner_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package actors_test - -import ( - "bytes" - "context" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-sectorbuilder" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - hamt "github.com/ipfs/go-hamt-ipld" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbg "github.com/whyrusleeping/cbor-gen" -) - -func TestMinerCommitSectors(t *testing.T) { - var worker, client address.Address - var minerAddr address.Address - opts := []HarnessOpt{ - HarnessAddr(&worker, 1000000), - HarnessAddr(&client, 1000000), - HarnessActor(&minerAddr, &worker, actors.StorageMinerCodeCid, - func() cbg.CBORMarshaler { - return &actors.StorageMinerConstructorParams{ - Owner: worker, - Worker: worker, - SectorSize: 1024, - PeerID: "fakepeerid", - } - }), - } - - h := NewHarness(t, opts...) - h.vm.Syscalls.ValidatePoRep = func(ctx context.Context, maddr address.Address, ssize uint64, commD, commR, ticket, proof, seed []byte, sectorID uint64) (bool, aerrors.ActorError) { - // all proofs are valid - return true, nil - } - - ret, _ := h.SendFunds(t, worker, minerAddr, types.NewInt(100000)) - ApplyOK(t, ret) - - ret, _ = h.InvokeWithValue(t, client, actors.StorageMarketAddress, actors.SMAMethods.AddBalance, types.NewInt(2000), nil) - ApplyOK(t, ret) - - addSectorToMiner(h, t, minerAddr, worker, client, 1) - - assertSectorIDs(h, t, minerAddr, []uint64{1}) -} - -func TestMinerSubmitBadFault(t *testing.T) { - var worker, client address.Address - var minerAddr address.Address - opts := []HarnessOpt{ - HarnessAddr(&worker, 1000000), - HarnessAddr(&client, 1000000), - HarnessActor(&minerAddr, &worker, actors.StorageMinerCodeCid, - func() cbg.CBORMarshaler { - return &actors.StorageMinerConstructorParams{ - Owner: worker, - Worker: worker, - SectorSize: 1024, - PeerID: "fakepeerid", - } - }), - } - - h := NewHarness(t, opts...) - h.vm.Syscalls.ValidatePoRep = func(ctx context.Context, maddr address.Address, ssize uint64, commD, commR, ticket, proof, seed []byte, sectorID uint64) (bool, aerrors.ActorError) { - // all proofs are valid - return true, nil - } - - ret, _ := h.SendFunds(t, worker, minerAddr, types.NewInt(100000)) - ApplyOK(t, ret) - - ret, _ = h.InvokeWithValue(t, client, actors.StorageMarketAddress, actors.SMAMethods.AddBalance, types.NewInt(2000), nil) - ApplyOK(t, ret) - - addSectorToMiner(h, t, minerAddr, worker, client, 1) - - assertSectorIDs(h, t, minerAddr, []uint64{1}) - - bf := types.NewBitField() - bf.Set(6) - ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.DeclareFaults, &actors.DeclareFaultsParams{bf}) - ApplyOK(t, ret) - - ret, _ = h.Invoke(t, actors.NetworkAddress, minerAddr, actors.MAMethods.SubmitElectionPoSt, nil) - ApplyOK(t, ret) - - assertSectorIDs(h, t, minerAddr, []uint64{1}) - - badnum := uint64(0) - badnum-- - bf = types.NewBitField() - bf.Set(badnum) - ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.DeclareFaults, &actors.DeclareFaultsParams{bf}) - ApplyOK(t, ret) - - ret, _ = h.Invoke(t, actors.NetworkAddress, minerAddr, actors.MAMethods.SubmitElectionPoSt, nil) - ApplyOK(t, ret) - - bf = types.NewBitField() - bf.Set(1) - ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.DeclareFaults, &actors.DeclareFaultsParams{bf}) - ApplyOK(t, ret) - - ret, _ = h.Invoke(t, actors.NetworkAddress, minerAddr, actors.MAMethods.SubmitElectionPoSt, nil) - ApplyOK(t, ret) - - assertSectorIDs(h, t, minerAddr, []uint64{}) - -} - -func addSectorToMiner(h *Harness, t *testing.T, minerAddr, worker, client address.Address, sid uint64) { - t.Helper() - s := sectorbuilder.UserBytesForSectorSize(1024) - deal := h.makeFakeDeal(t, minerAddr, worker, client, s) - ret, _ := h.Invoke(t, worker, actors.StorageMarketAddress, actors.SMAMethods.PublishStorageDeals, - &actors.PublishStorageDealsParams{ - Deals: []actors.StorageDealProposal{*deal}, - }) - ApplyOK(t, ret) - var dealIds actors.PublishStorageDealResponse - if err := dealIds.UnmarshalCBOR(bytes.NewReader(ret.Return)); err != nil { - t.Fatal(err) - } - - dealid := dealIds.DealIDs[0] - - ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.PreCommitSector, - &actors.SectorPreCommitInfo{ - SectorNumber: sid, - CommR: []byte("cats"), - SealEpoch: 10, - DealIDs: []uint64{dealid}, - }) - ApplyOK(t, ret) - - h.BlockHeight += 100 - ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.ProveCommitSector, - &actors.SectorProveCommitInfo{ - Proof: []byte("prooofy"), - SectorID: sid, - DealIDs: []uint64{dealid}, // TODO: weird that i have to pass this again - }) - ApplyOK(t, ret) -} - -func assertSectorIDs(h *Harness, t *testing.T, maddr address.Address, ids []uint64) { - t.Helper() - sectors, err := getMinerSectorSet(context.TODO(), h.vm.StateTree(), h.bs, maddr) - if err != nil { - t.Fatal(err) - } - - if len(sectors) != len(ids) { - t.Fatal("miner has wrong number of sectors in their sector set") - } - - all := make(map[uint64]bool) - for _, s := range sectors { - all[s.SectorID] = true - } - - for _, id := range ids { - if !all[id] { - t.Fatal("expected to find sector ID: ", id) - } - } -} - -func getMinerSectorSet(ctx context.Context, st types.StateTree, bs blockstore.Blockstore, maddr address.Address) ([]*api.ChainSectorInfo, error) { - mact, err := st.GetActor(maddr) - if err != nil { - return nil, err - } - - cst := hamt.CSTFromBstore(bs) - - var mstate actors.StorageMinerActorState - if err := cst.Get(ctx, mact.Head, &mstate); err != nil { - return nil, err - } - - return stmgr.LoadSectorsFromSet(ctx, bs, mstate.Sectors) -} - -func (h *Harness) makeFakeDeal(t *testing.T, miner, worker, client address.Address, size uint64) *actors.StorageDealProposal { - data := make([]byte, size) - rand.Read(data) - commP, err := sectorbuilder.GeneratePieceCommitment(bytes.NewReader(data), size) - if err != nil { - t.Fatal(err) - } - - prop := actors.StorageDealProposal{ - PieceRef: commP[:], - PieceSize: size, - //PieceSerialization SerializationMode // Needs to be here as it tells how data in the sector maps to PieceRef cid - - Client: client, - Provider: miner, - - ProposalExpiration: 10000, - Duration: 150, - - StoragePricePerEpoch: types.NewInt(1), - StorageCollateral: types.NewInt(0), - } - - if err := api.SignWith(context.TODO(), h.w.Sign, client, &prop); err != nil { - t.Fatal(err) - } - - return &prop -} diff --git a/chain/actors/actor_multisig.go b/chain/actors/actor_multisig.go deleted file mode 100644 index 4cff548ad..000000000 --- a/chain/actors/actor_multisig.go +++ /dev/null @@ -1,431 +0,0 @@ -package actors - -import ( - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" - - cbg "github.com/whyrusleeping/cbor-gen" -) - -type MultiSigActor struct{} -type MultiSigActorState struct { - Signers []address.Address - Required uint64 - NextTxID uint64 - - InitialBalance types.BigInt - StartingBlock uint64 - UnlockDuration uint64 - - //TODO: make this map/sharray/whatever - Transactions []MTransaction -} - -func (msas MultiSigActorState) canSpend(act *types.Actor, amnt types.BigInt, height uint64) bool { - if msas.UnlockDuration == 0 { - return true - } - - offset := height - msas.StartingBlock - if offset > msas.UnlockDuration { - return true - } - - minBalance := types.BigDiv(msas.InitialBalance, types.NewInt(msas.UnlockDuration)) - minBalance = types.BigMul(minBalance, types.NewInt(offset)) - return !minBalance.LessThan(types.BigSub(act.Balance, amnt)) -} - -func (msas MultiSigActorState) isSigner(addr address.Address) bool { - for _, s := range msas.Signers { - if s == addr { - return true - } - } - return false -} - -func (msas MultiSigActorState) getTransaction(txid uint64) (*MTransaction, ActorError) { - if txid >= uint64(len(msas.Transactions)) { - return nil, aerrors.Newf(1, "could not get transaction (numbers of tx %d,want to get txid %d)", len(msas.Transactions), txid) - } - return &msas.Transactions[txid], nil -} - -type MTransaction struct { - Created uint64 // NOT USED ?? - TxID uint64 - - To address.Address - Value types.BigInt - Method uint64 - Params []byte - - Approved []address.Address - Complete bool - Canceled bool - RetCode uint64 -} - -func (tx MTransaction) Active() ActorError { - if tx.Complete { - return aerrors.New(2, "transaction already completed") - } - if tx.Canceled { - return aerrors.New(3, "transaction canceled") - } - return nil -} - -type musigMethods struct { - MultiSigConstructor uint64 - Propose uint64 - Approve uint64 - Cancel uint64 - ClearCompleted uint64 - AddSigner uint64 - RemoveSigner uint64 - SwapSigner uint64 - ChangeRequirement uint64 -} - -var MultiSigMethods = musigMethods{1, 2, 3, 4, 5, 6, 7, 8, 9} - -func (msa MultiSigActor) Exports() []interface{} { - return []interface{}{ - 1: msa.MultiSigConstructor, - 2: msa.Propose, - 3: msa.Approve, - 4: msa.Cancel, - //5: msa.ClearCompleted, - 6: msa.AddSigner, - 7: msa.RemoveSigner, - 8: msa.SwapSigner, - 9: msa.ChangeRequirement, - } -} - -type MultiSigConstructorParams struct { - Signers []address.Address - Required uint64 - UnlockDuration uint64 -} - -func (MultiSigActor) MultiSigConstructor(act *types.Actor, vmctx types.VMContext, - params *MultiSigConstructorParams) ([]byte, ActorError) { - self := &MultiSigActorState{ - Signers: params.Signers, - Required: params.Required, - } - - if params.UnlockDuration != 0 { - self.InitialBalance = vmctx.Message().Value - self.UnlockDuration = params.UnlockDuration - self.StartingBlock = vmctx.BlockHeight() - } - - head, err := vmctx.Storage().Put(self) - if err != nil { - return nil, aerrors.Wrap(err, "could not put new head") - } - err = vmctx.Storage().Commit(EmptyCBOR, head) - if err != nil { - return nil, aerrors.Wrap(err, "could not commit new head") - } - return nil, nil -} - -type MultiSigProposeParams struct { - To address.Address - Value types.BigInt - Method uint64 - Params []byte -} - -func (MultiSigActor) load(vmctx types.VMContext) (cid.Cid, *MultiSigActorState, ActorError) { - var self MultiSigActorState - head := vmctx.Storage().GetHead() - - err := vmctx.Storage().Get(head, &self) - if err != nil { - return cid.Undef, nil, aerrors.Wrap(err, "could not get self") - } - return head, &self, nil -} - -func (msa MultiSigActor) loadAndVerify(vmctx types.VMContext) (cid.Cid, *MultiSigActorState, ActorError) { - head, self, err := msa.load(vmctx) - if err != nil { - return cid.Undef, nil, err - } - - if !self.isSigner(vmctx.Message().From) { - return cid.Undef, nil, aerrors.New(1, "not authorized") - } - return head, self, nil -} - -func (MultiSigActor) save(vmctx types.VMContext, oldHead cid.Cid, self *MultiSigActorState) ActorError { - newHead, err := vmctx.Storage().Put(self) - if err != nil { - return aerrors.Wrap(err, "could not put new head") - } - err = vmctx.Storage().Commit(oldHead, newHead) - if err != nil { - return aerrors.Wrap(err, "could not commit new head") - } - return nil - -} - -func (msa MultiSigActor) Propose(act *types.Actor, vmctx types.VMContext, - params *MultiSigProposeParams) ([]byte, ActorError) { - - head, self, err := msa.loadAndVerify(vmctx) - if err != nil { - return nil, err - } - - txid := self.NextTxID - self.NextTxID++ - - { - tx := MTransaction{ - TxID: txid, - To: params.To, - Value: params.Value, - Method: params.Method, - Params: params.Params, - Approved: []address.Address{vmctx.Message().From}, - } - self.Transactions = append(self.Transactions, tx) - } - - tx, err := self.getTransaction(txid) - if err != nil { - return nil, err - } - - if self.Required == 1 { - if !self.canSpend(act, tx.Value, vmctx.BlockHeight()) { - return nil, aerrors.New(100, "transaction amount exceeds available") - } - _, err := vmctx.Send(tx.To, tx.Method, tx.Value, tx.Params) - if aerrors.IsFatal(err) { - return nil, err - } - tx.RetCode = uint64(aerrors.RetCode(err)) - tx.Complete = true - } - - err = msa.save(vmctx, head, self) - if err != nil { - return nil, aerrors.Wrap(err, "saving state") - } - - // REVIEW: On one hand, I like being very explicit about how we're doing the serialization - // on the other, maybe we shouldnt do direct calls to underlying serialization libs? - return cbg.CborEncodeMajorType(cbg.MajUnsignedInt, tx.TxID), nil -} - -type MultiSigTxID struct { - TxID uint64 -} - -func (msa MultiSigActor) Approve(act *types.Actor, vmctx types.VMContext, - params *MultiSigTxID) ([]byte, ActorError) { - - head, self, err := msa.loadAndVerify(vmctx) - if err != nil { - return nil, err - } - - tx, err := self.getTransaction(params.TxID) - if err != nil { - return nil, err - } - - if err := tx.Active(); err != nil { - return nil, aerrors.Wrap(err, "could not approve") - } - - for _, signer := range tx.Approved { - if signer == vmctx.Message().From { - return nil, aerrors.New(4, "already signed this message") - } - } - tx.Approved = append(tx.Approved, vmctx.Message().From) - if uint64(len(tx.Approved)) >= self.Required { - if !self.canSpend(act, tx.Value, vmctx.BlockHeight()) { - return nil, aerrors.New(100, "transaction amount exceeds available") - } - _, err := vmctx.Send(tx.To, tx.Method, tx.Value, tx.Params) - if aerrors.IsFatal(err) { - return nil, err - } - tx.RetCode = uint64(aerrors.RetCode(err)) - tx.Complete = true - } - - return nil, msa.save(vmctx, head, self) -} - -func (msa MultiSigActor) Cancel(act *types.Actor, vmctx types.VMContext, - params *MultiSigTxID) ([]byte, ActorError) { - - head, self, err := msa.loadAndVerify(vmctx) - if err != nil { - return nil, err - } - - tx, err := self.getTransaction(params.TxID) - if err != nil { - return nil, err - } - - if err := tx.Active(); err != nil { - return nil, aerrors.Wrap(err, "could not cancel") - } - - proposer := tx.Approved[0] - if proposer != vmctx.Message().From && self.isSigner(proposer) { - return nil, aerrors.New(4, "cannot cancel another signers transaction") - } - tx.Canceled = true - - return nil, msa.save(vmctx, head, self) -} - -type MultiSigAddSignerParam struct { - Signer address.Address - Increase bool -} - -func (msa MultiSigActor) AddSigner(act *types.Actor, vmctx types.VMContext, - params *MultiSigAddSignerParam) ([]byte, ActorError) { - - head, self, err := msa.load(vmctx) - if err != nil { - return nil, err - } - - msg := vmctx.Message() - if msg.From != msg.To { - return nil, aerrors.New(4, "add signer must be called by wallet itself") - } - if self.isSigner(params.Signer) { - return nil, aerrors.New(5, "new address is already a signer") - } - - self.Signers = append(self.Signers, params.Signer) - if params.Increase { - self.Required = self.Required + 1 - } - - return nil, msa.save(vmctx, head, self) -} - -type MultiSigRemoveSignerParam struct { - Signer address.Address - Decrease bool -} - -func (msa MultiSigActor) RemoveSigner(act *types.Actor, vmctx types.VMContext, - params *MultiSigRemoveSignerParam) ([]byte, ActorError) { - - head, self, err := msa.load(vmctx) - if err != nil { - return nil, err - } - - msg := vmctx.Message() - if msg.From != msg.To { - return nil, aerrors.New(4, "remove signer must be called by wallet itself") - } - if !self.isSigner(params.Signer) { - return nil, aerrors.New(5, "given address was not a signer") - } - - newSigners := make([]address.Address, 0, len(self.Signers)-1) - for _, s := range self.Signers { - if s != params.Signer { - newSigners = append(newSigners, s) - } - } - if params.Decrease || uint64(len(self.Signers)-1) < self.Required { - self.Required = self.Required - 1 - } - - self.Signers = newSigners - - return nil, msa.save(vmctx, head, self) -} - -type MultiSigSwapSignerParams struct { - From address.Address - To address.Address -} - -func (msa MultiSigActor) SwapSigner(act *types.Actor, vmctx types.VMContext, - params *MultiSigSwapSignerParams) ([]byte, ActorError) { - - head, self, err := msa.load(vmctx) - if err != nil { - return nil, err - } - - msg := vmctx.Message() - if msg.From != msg.To { - return nil, aerrors.New(4, "swap signer must be called by wallet itself") - } - - if !self.isSigner(params.From) { - return nil, aerrors.New(5, "given old address was not a signer") - } - if self.isSigner(params.To) { - return nil, aerrors.New(6, "given new address was already a signer") - } - - newSigners := make([]address.Address, 0, len(self.Signers)) - for _, s := range self.Signers { - if s != params.From { - newSigners = append(newSigners, s) - } - } - newSigners = append(newSigners, params.To) - self.Signers = newSigners - - return nil, msa.save(vmctx, head, self) -} - -type MultiSigChangeReqParams struct { - Req uint64 -} - -func (msa MultiSigActor) ChangeRequirement(act *types.Actor, vmctx types.VMContext, - params *MultiSigChangeReqParams) ([]byte, ActorError) { - - head, self, err := msa.load(vmctx) - if err != nil { - return nil, err - } - - msg := vmctx.Message() - if msg.From != msg.To { - return nil, aerrors.New(4, "change requirement must be called by wallet itself") - } - - if params.Req < 1 { - return nil, aerrors.New(5, "requirement must be at least 1") - } - - if params.Req > uint64(len(self.Signers)) { - return nil, aerrors.New(6, "requirement must be at most the numbers of signers") - } - - self.Required = params.Req - return nil, msa.save(vmctx, head, self) -} diff --git a/chain/actors/actor_multisig_test.go b/chain/actors/actor_multisig_test.go deleted file mode 100644 index 5ed76265c..000000000 --- a/chain/actors/actor_multisig_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package actors_test - -import ( - "testing" - - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" -) - -func TestMultiSigCreate(t *testing.T) { - var creatorAddr, sig1Addr, sig2Addr, outsideAddr address.Address - opts := []HarnessOpt{ - HarnessAddr(&creatorAddr, 100000), - HarnessAddr(&sig1Addr, 100000), - HarnessAddr(&sig2Addr, 100000), - HarnessAddr(&outsideAddr, 100000), - } - - h := NewHarness(t, opts...) - ret, _ := h.CreateActor(t, creatorAddr, actors.MultisigCodeCid, - &actors.MultiSigConstructorParams{ - Signers: []address.Address{creatorAddr, sig1Addr, sig2Addr}, - Required: 2, - }) - ApplyOK(t, ret) -} - -func ApplyOK(t testing.TB, ret *vm.ApplyRet) { - t.Helper() - if ret.ExitCode != 0 { - t.Fatalf("exit code should be 0, got %d, actorErr: %+v", ret.ExitCode, ret.ActorErr) - } - if ret.ActorErr != nil { - t.Fatalf("somehow got an error with exit == 0: %s", ret.ActorErr) - } -} - -func TestMultiSigOps(t *testing.T) { - var creatorAddr, sig1Addr, sig2Addr, outsideAddr address.Address - var multSigAddr address.Address - opts := []HarnessOpt{ - HarnessAddr(&creatorAddr, 100000), - HarnessAddr(&sig1Addr, 100000), - HarnessAddr(&sig2Addr, 100000), - HarnessAddr(&outsideAddr, 100000), - HarnessActor(&multSigAddr, &creatorAddr, actors.MultisigCodeCid, - func() cbg.CBORMarshaler { - return &actors.MultiSigConstructorParams{ - Signers: []address.Address{creatorAddr, sig1Addr, sig2Addr}, - Required: 2, - } - }), - } - - h := NewHarness(t, opts...) - { - const chargeVal = 2000 - // Send funds into the multisig - ret, _ := h.SendFunds(t, creatorAddr, multSigAddr, types.NewInt(chargeVal)) - ApplyOK(t, ret) - h.AssertBalanceChange(t, creatorAddr, -chargeVal) - h.AssertBalanceChange(t, multSigAddr, chargeVal) - } - - { - // Transfer funds outside of multsig - const sendVal = 1000 - ret, _ := h.Invoke(t, creatorAddr, multSigAddr, actors.MultiSigMethods.Propose, - &actors.MultiSigProposeParams{ - To: outsideAddr, - Value: types.NewInt(sendVal), - }) - ApplyOK(t, ret) - var txIDParam actors.MultiSigTxID - err := cbor.DecodeInto(ret.Return, &txIDParam.TxID) - assert.NoError(t, err, "decoding txid") - - ret, _ = h.Invoke(t, outsideAddr, multSigAddr, actors.MultiSigMethods.Approve, - &txIDParam) - assert.Equal(t, uint8(1), ret.ExitCode, "outsideAddr should not approve") - h.AssertBalanceChange(t, multSigAddr, 0) - - ret2, _ := h.Invoke(t, sig1Addr, multSigAddr, actors.MultiSigMethods.Approve, - &txIDParam) - ApplyOK(t, ret2) - - h.AssertBalanceChange(t, outsideAddr, sendVal) - h.AssertBalanceChange(t, multSigAddr, -sendVal) - } - -} diff --git a/chain/actors/actor_paych.go b/chain/actors/actor_paych.go deleted file mode 100644 index 32181e592..000000000 --- a/chain/actors/actor_paych.go +++ /dev/null @@ -1,302 +0,0 @@ -package actors - -import ( - "bytes" - "fmt" - - "github.com/ipfs/go-cid" - "github.com/minio/blake2b-simd" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" -) - -type PaymentChannelActor struct{} - -type PaymentInfo struct { - PayChActor address.Address - Payer address.Address - ChannelMessage *cid.Cid - - Vouchers []*types.SignedVoucher -} - -type LaneState struct { - Closed bool - Redeemed types.BigInt - Nonce uint64 -} - -type PaymentChannelActorState struct { - From address.Address - To address.Address - - ToSend types.BigInt - - ClosingAt uint64 - MinCloseHeight uint64 - - // TODO: needs to be map[uint64]*laneState - // waiting on refmt#35 to be fixed - LaneStates map[string]*LaneState -} - -func (pca PaymentChannelActor) Exports() []interface{} { - return []interface{}{ - 1: pca.Constructor, - 2: pca.UpdateChannelState, - 3: pca.Close, - 4: pca.Collect, - 5: pca.GetOwner, - 6: pca.GetToSend, - } -} - -type pcaMethods struct { - Constructor uint64 - UpdateChannelState uint64 - Close uint64 - Collect uint64 - GetOwner uint64 - GetToSend uint64 -} - -var PCAMethods = pcaMethods{1, 2, 3, 4, 5, 6} - -type PCAConstructorParams struct { - To address.Address -} - -func (pca PaymentChannelActor) Constructor(act *types.Actor, vmctx types.VMContext, params *PCAConstructorParams) ([]byte, ActorError) { - var self PaymentChannelActorState - self.From = vmctx.Origin() - self.To = params.To - self.LaneStates = make(map[string]*LaneState) - - storage := vmctx.Storage() - c, err := storage.Put(&self) - if err != nil { - return nil, err - } - - if err := storage.Commit(EmptyCBOR, c); err != nil { - return nil, err - } - - return nil, nil -} - -type PCAUpdateChannelStateParams struct { - Sv types.SignedVoucher - Secret []byte - Proof []byte -} - -func hash(b []byte) []byte { - s := blake2b.Sum256(b) - return s[:] -} - -type PaymentVerifyParams struct { - Extra []byte - Proof []byte -} - -func (pca PaymentChannelActor) UpdateChannelState(act *types.Actor, vmctx types.VMContext, params *PCAUpdateChannelStateParams) ([]byte, ActorError) { - var self PaymentChannelActorState - oldstate := vmctx.Storage().GetHead() - storage := vmctx.Storage() - if err := storage.Get(oldstate, &self); err != nil { - return nil, err - } - - sv := params.Sv - - vb, nerr := sv.SigningBytes() - if nerr != nil { - return nil, aerrors.Escalate(nerr, "failed to serialize signedvoucher") - } - - if err := vmctx.VerifySignature(sv.Signature, self.From, vb); err != nil { - return nil, err - } - - if vmctx.BlockHeight() < sv.TimeLock { - return nil, aerrors.New(2, "cannot use this voucher yet!") - } - - if len(sv.SecretPreimage) > 0 { - if !bytes.Equal(hash(params.Secret), sv.SecretPreimage) { - return nil, aerrors.New(3, "incorrect secret!") - } - } - - if sv.Extra != nil { - encoded, err := SerializeParams(&PaymentVerifyParams{sv.Extra.Data, params.Proof}) - if err != nil { - return nil, err - } - - _, err = vmctx.Send(sv.Extra.Actor, sv.Extra.Method, types.NewInt(0), encoded) - if err != nil { - return nil, aerrors.Newf(4, "spend voucher verification failed: %s", err) - } - } - - ls, ok := self.LaneStates[fmt.Sprint(sv.Lane)] - if !ok { - ls = new(LaneState) - ls.Redeemed = types.NewInt(0) // TODO: kinda annoying that this doesnt default to a usable value - self.LaneStates[fmt.Sprint(sv.Lane)] = ls - } - if ls.Closed { - return nil, aerrors.New(5, "cannot redeem a voucher on a closed lane") - } - - if ls.Nonce > sv.Nonce { - return nil, aerrors.New(6, "voucher has an outdated nonce, cannot redeem") - } - - mergeValue := types.NewInt(0) - for _, merge := range sv.Merges { - if merge.Lane == sv.Lane { - return nil, aerrors.New(7, "voucher cannot merge its own lane") - } - - ols := self.LaneStates[fmt.Sprint(merge.Lane)] - - if ols.Nonce >= merge.Nonce { - return nil, aerrors.New(8, "merge in voucher has outdated nonce, cannot redeem") - } - - mergeValue = types.BigAdd(mergeValue, ols.Redeemed) - ols.Nonce = merge.Nonce - } - - ls.Nonce = sv.Nonce - balanceDelta := types.BigSub(sv.Amount, types.BigAdd(mergeValue, ls.Redeemed)) - ls.Redeemed = sv.Amount - - newSendBalance := types.BigAdd(self.ToSend, balanceDelta) - if newSendBalance.LessThan(types.NewInt(0)) { - // TODO: is this impossible? - return nil, aerrors.New(9, "voucher would leave channel balance negative") - } - - if newSendBalance.GreaterThan(act.Balance) { - return nil, aerrors.New(10, "not enough funds in channel to cover voucher") - } - - log.Info("vals: ", newSendBalance, sv.Amount, balanceDelta, mergeValue, ls.Redeemed) - self.ToSend = newSendBalance - - if sv.MinCloseHeight != 0 { - if self.ClosingAt != 0 && self.ClosingAt < sv.MinCloseHeight { - self.ClosingAt = sv.MinCloseHeight - } - if self.MinCloseHeight < sv.MinCloseHeight { - self.MinCloseHeight = sv.MinCloseHeight - } - } - - ncid, err := storage.Put(&self) - if err != nil { - return nil, err - } - if err := storage.Commit(oldstate, ncid); err != nil { - return nil, err - } - - return nil, nil -} - -func (pca PaymentChannelActor) Close(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) { - var self PaymentChannelActorState - storage := vmctx.Storage() - oldstate := storage.GetHead() - if err := storage.Get(oldstate, &self); err != nil { - return nil, err - } - - if vmctx.Message().From != self.From && vmctx.Message().From != self.To { - return nil, aerrors.New(1, "not authorized to close channel") - } - - if self.ClosingAt != 0 { - return nil, aerrors.New(2, "channel already closing") - } - - self.ClosingAt = vmctx.BlockHeight() + build.PaymentChannelClosingDelay - if self.ClosingAt < self.MinCloseHeight { - self.ClosingAt = self.MinCloseHeight - } - - ncid, err := storage.Put(&self) - if err != nil { - return nil, err - } - if err := storage.Commit(oldstate, ncid); err != nil { - return nil, err - } - - return nil, nil -} - -func (pca PaymentChannelActor) Collect(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) { - var self PaymentChannelActorState - storage := vmctx.Storage() - oldstate := storage.GetHead() - if err := storage.Get(oldstate, &self); err != nil { - return nil, err - } - - if self.ClosingAt == 0 { - return nil, aerrors.New(1, "payment channel not closing or closed") - } - - if vmctx.BlockHeight() < self.ClosingAt { - return nil, aerrors.New(2, "payment channel not closed yet") - } - _, err := vmctx.Send(self.From, 0, types.BigSub(act.Balance, self.ToSend), nil) - if err != nil { - return nil, err - } - _, err = vmctx.Send(self.To, 0, self.ToSend, nil) - if err != nil { - return nil, err - } - - self.ToSend = types.NewInt(0) - - ncid, err := storage.Put(&self) - if err != nil { - return nil, err - } - if err := storage.Commit(oldstate, ncid); err != nil { - return nil, err - } - - return nil, nil -} - -func (pca PaymentChannelActor) GetOwner(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) { - var self PaymentChannelActorState - storage := vmctx.Storage() - if err := storage.Get(storage.GetHead(), &self); err != nil { - return nil, err - } - - return self.From.Bytes(), nil -} - -func (pca PaymentChannelActor) GetToSend(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) { - var self PaymentChannelActorState - storage := vmctx.Storage() - if err := storage.Get(storage.GetHead(), &self); err != nil { - return nil, err - } - - return self.ToSend.Bytes(), nil -} diff --git a/chain/actors/actor_paych_test.go b/chain/actors/actor_paych_test.go deleted file mode 100644 index 226f147c5..000000000 --- a/chain/actors/actor_paych_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package actors_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" -) - -func TestPaychCreate(t *testing.T) { - var creatorAddr, targetAddr address.Address - opts := []HarnessOpt{ - HarnessAddr(&creatorAddr, 100000), - HarnessAddr(&targetAddr, 100000), - } - - h := NewHarness(t, opts...) - ret, _ := h.CreateActor(t, creatorAddr, actors.PaymentChannelCodeCid, - &actors.PCAConstructorParams{ - To: targetAddr, - }) - ApplyOK(t, ret) -} - -func signVoucher(t *testing.T, w *wallet.Wallet, addr address.Address, sv *types.SignedVoucher) { - vb, err := sv.SigningBytes() - if err != nil { - t.Fatal(err) - } - - sig, err := w.Sign(context.TODO(), addr, vb) - if err != nil { - t.Fatal(err) - } - - sv.Signature = sig -} - -func TestPaychUpdate(t *testing.T) { - var creatorAddr, targetAddr address.Address - opts := []HarnessOpt{ - HarnessAddr(&creatorAddr, 100000), - HarnessAddr(&targetAddr, 100000), - } - - h := NewHarness(t, opts...) - ret, _ := h.CreateActor(t, creatorAddr, actors.PaymentChannelCodeCid, - &actors.PCAConstructorParams{ - To: targetAddr, - }) - ApplyOK(t, ret) - pch, err := address.NewFromBytes(ret.Return) - if err != nil { - t.Fatal(err) - } - - ret, _ = h.SendFunds(t, creatorAddr, pch, types.NewInt(5000)) - ApplyOK(t, ret) - - sv := &types.SignedVoucher{ - Amount: types.NewInt(100), - Nonce: 1, - } - signVoucher(t, h.w, creatorAddr, sv) - - ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.UpdateChannelState, &actors.PCAUpdateChannelStateParams{ - Sv: *sv, - }) - ApplyOK(t, ret) - - ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.GetToSend, nil) - ApplyOK(t, ret) - - bi := types.BigFromBytes(ret.Return) - if bi.String() != "100" { - t.Fatal("toSend amount was wrong: ", bi.String()) - } - - ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.Close, nil) - ApplyOK(t, ret) - - // now we have to 'wait' for the chain to advance. - h.BlockHeight = 1000 - - ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.Collect, nil) - ApplyOK(t, ret) - - h.AssertBalanceChange(t, targetAddr, 100) - h.AssertBalanceChange(t, creatorAddr, -100) -} diff --git a/chain/actors/actor_storagemarket.go b/chain/actors/actor_storagemarket.go deleted file mode 100644 index e4ad58cb5..000000000 --- a/chain/actors/actor_storagemarket.go +++ /dev/null @@ -1,670 +0,0 @@ -package actors - -import ( - "bytes" - "context" - "sort" - - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-amt-ipld" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-hamt-ipld" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-sectorbuilder" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" -) - -type StorageMarketActor struct{} - -type smaMethods struct { - Constructor uint64 - WithdrawBalance uint64 - AddBalance uint64 - CheckLockedBalance uint64 - PublishStorageDeals uint64 - HandleCronAction uint64 - SettleExpiredDeals uint64 - ProcessStorageDealsPayment uint64 - SlashStorageDealCollateral uint64 - GetLastExpirationFromDealIDs uint64 - ActivateStorageDeals uint64 - ComputeDataCommitment uint64 -} - -var SMAMethods = smaMethods{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} - -func (sma StorageMarketActor) Exports() []interface{} { - return []interface{}{ - 2: sma.WithdrawBalance, - 3: sma.AddBalance, - // 4: sma.CheckLockedBalance, - 5: sma.PublishStorageDeals, - // 6: sma.HandleCronAction, - // 7: sma.SettleExpiredDeals, - // 8: sma.ProcessStorageDealsPayment, - // 9: sma.SlashStorageDealCollateral, - // 10: sma.GetLastExpirationFromDealIDs, - 11: sma.ActivateStorageDeals, // TODO: move under PublishStorageDeals after specs team approves - 12: sma.ComputeDataCommitment, - } -} - -type StorageParticipantBalance struct { - Locked types.BigInt - Available types.BigInt -} - -type StorageMarketState struct { - Balances cid.Cid // hamt - Deals cid.Cid // amt - - NextDealID uint64 // TODO: spec -} - -// TODO: Drop in favour of car storage -type SerializationMode = uint64 - -const ( - SerializationUnixFSv0 = iota - // IPLD / car -) - -type StorageDealProposal struct { - PieceRef []byte // cid bytes // TODO: spec says to use cid.Cid, probably not a good idea - PieceSize uint64 - - Client address.Address - Provider address.Address - - ProposalExpiration uint64 - Duration uint64 // TODO: spec - - StoragePricePerEpoch types.BigInt - StorageCollateral types.BigInt - - ProposerSignature *types.Signature -} - -func (sdp *StorageDealProposal) TotalStoragePrice() types.BigInt { - return types.BigMul(sdp.StoragePricePerEpoch, types.NewInt(sdp.Duration)) -} - -type SignFunc = func(context.Context, []byte) (*types.Signature, error) - -func (sdp *StorageDealProposal) Sign(ctx context.Context, sign SignFunc) error { - if sdp.ProposerSignature != nil { - return xerrors.New("signature already present in StorageDealProposal") - } - var buf bytes.Buffer - if err := sdp.MarshalCBOR(&buf); err != nil { - return err - } - sig, err := sign(ctx, buf.Bytes()) - if err != nil { - return err - } - sdp.ProposerSignature = sig - return nil -} - -func (sdp *StorageDealProposal) Cid() (cid.Cid, error) { - nd, err := cborutil.AsIpld(sdp) - if err != nil { - return cid.Undef, err - } - - return nd.Cid(), nil -} - -func (sdp *StorageDealProposal) Verify() error { - unsigned := *sdp - unsigned.ProposerSignature = nil - var buf bytes.Buffer - if err := unsigned.MarshalCBOR(&buf); err != nil { - return err - } - - return sdp.ProposerSignature.Verify(sdp.Client, buf.Bytes()) -} - -type OnChainDeal struct { - PieceRef []byte // cid bytes // TODO: spec says to use cid.Cid, probably not a good idea - PieceSize uint64 - - Client address.Address - Provider address.Address - - ProposalExpiration uint64 - Duration uint64 // TODO: spec - - StoragePricePerEpoch types.BigInt - StorageCollateral types.BigInt - ActivationEpoch uint64 // 0 = inactive -} - -type WithdrawBalanceParams struct { - Balance types.BigInt -} - -func (sma StorageMarketActor) WithdrawBalance(act *types.Actor, vmctx types.VMContext, params *WithdrawBalanceParams) ([]byte, ActorError) { - // TODO: (spec) this should be 2-stage - - var self StorageMarketState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - b, bnd, err := GetMarketBalances(vmctx.Context(), vmctx.Ipld(), self.Balances, vmctx.Message().From) - if err != nil { - return nil, aerrors.Wrap(err, "could not get balance") - } - - balance := b[0] - - if balance.Available.LessThan(params.Balance) { - return nil, aerrors.Newf(1, "can not withdraw more funds than available: %s > %s", params.Balance, b[0].Available) - } - - balance.Available = types.BigSub(balance.Available, params.Balance) - - _, err = vmctx.Send(vmctx.Message().From, 0, params.Balance, nil) - if err != nil { - return nil, aerrors.Wrap(err, "sending funds failed") - } - - bcid, err := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{ - vmctx.Message().From: balance, - }) - if err != nil { - return nil, err - } - - self.Balances = bcid - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, err - } - - return nil, vmctx.Storage().Commit(old, nroot) -} - -func (sma StorageMarketActor) AddBalance(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - var self StorageMarketState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - b, bnd, err := GetMarketBalances(vmctx.Context(), vmctx.Ipld(), self.Balances, vmctx.Message().From) - if err != nil { - return nil, aerrors.Wrap(err, "could not get balance") - } - - balance := b[0] - - balance.Available = types.BigAdd(balance.Available, vmctx.Message().Value) - - bcid, err := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{ - vmctx.Message().From: balance, - }) - if err != nil { - return nil, err - } - - self.Balances = bcid - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, err - } - - return nil, vmctx.Storage().Commit(old, nroot) -} - -func setMarketBalances(vmctx types.VMContext, nd *hamt.Node, set map[address.Address]StorageParticipantBalance) (cid.Cid, ActorError) { - keys := make([]address.Address, 0, len(set)) - for k := range set { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { - return bytes.Compare(keys[i].Bytes(), keys[j].Bytes()) < 0 - }) - for _, addr := range keys { - balance := set[addr] - if err := nd.Set(vmctx.Context(), string(addr.Bytes()), &balance); err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "setting new balance") - } - } - if err := nd.Flush(vmctx.Context()); err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "flushing balance hamt") - } - - c, err := vmctx.Ipld().Put(vmctx.Context(), nd) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to balances storage") - } - return c, nil -} - -func GetMarketBalances(ctx context.Context, store *hamt.CborIpldStore, rcid cid.Cid, addrs ...address.Address) ([]StorageParticipantBalance, *hamt.Node, ActorError) { - ctx, span := trace.StartSpan(ctx, "GetMarketBalances") - defer span.End() - - nd, err := hamt.LoadNode(ctx, store, rcid) - if err != nil { - return nil, nil, aerrors.HandleExternalError(err, "failed to load miner set") - } - - out := make([]StorageParticipantBalance, len(addrs)) - - for i, a := range addrs { - var balance StorageParticipantBalance - err = nd.Find(ctx, string(a.Bytes()), &balance) - switch err { - case hamt.ErrNotFound: - out[i] = StorageParticipantBalance{ - Locked: types.NewInt(0), - Available: types.NewInt(0), - } - case nil: - out[i] = balance - default: - return nil, nil, aerrors.HandleExternalError(err, "failed to do set lookup") - } - - } - - return out, nd, nil -} - -/* -func (sma StorageMarketActor) CheckLockedBalance(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - -} -*/ - -type PublishStorageDealsParams struct { - Deals []StorageDealProposal -} - -type PublishStorageDealResponse struct { - DealIDs []uint64 -} - -func (sma StorageMarketActor) PublishStorageDeals(act *types.Actor, vmctx types.VMContext, params *PublishStorageDealsParams) ([]byte, ActorError) { - var self StorageMarketState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals) - if err != nil { - return nil, aerrors.HandleExternalError(err, "loading deals amt") - } - - // todo: handle duplicate deals - - if len(params.Deals) == 0 { - return nil, aerrors.New(1, "no storage deals in params.Deals") - } - - out := PublishStorageDealResponse{ - DealIDs: make([]uint64, len(params.Deals)), - } - - workerBytes, aerr := vmctx.Send(params.Deals[0].Provider, MAMethods.GetWorkerAddr, types.NewInt(0), nil) - if aerr != nil { - return nil, aerr - } - providerWorker, err := address.NewFromBytes(workerBytes) - if err != nil { - return nil, aerrors.HandleExternalError(err, "parsing provider worker address bytes") - } - - // TODO: REVIEW: Do we want to check if provider exists in the power actor? - - for i, deal := range params.Deals { - if err := self.validateDeal(vmctx, deal, providerWorker); err != nil { - return nil, err - } - - err := deals.Set(self.NextDealID, &OnChainDeal{ - PieceRef: deal.PieceRef, - PieceSize: deal.PieceSize, - - Client: deal.Client, - Provider: deal.Provider, - - ProposalExpiration: deal.ProposalExpiration, - Duration: deal.Duration, - - StoragePricePerEpoch: deal.StoragePricePerEpoch, - StorageCollateral: deal.StorageCollateral, - ActivationEpoch: 0, - }) - if err != nil { - return nil, aerrors.HandleExternalError(err, "setting deal in deal AMT") - } - out.DealIDs[i] = self.NextDealID - - self.NextDealID++ - } - - dealsCid, err := deals.Flush() - if err != nil { - return nil, aerrors.HandleExternalError(err, "saving deals AMT") - } - - self.Deals = dealsCid - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, aerrors.HandleExternalError(err, "storing state failed") - } - - aerr = vmctx.Storage().Commit(old, nroot) - if aerr != nil { - return nil, aerr - } - - var outBuf bytes.Buffer - if err := out.MarshalCBOR(&outBuf); err != nil { - return nil, aerrors.HandleExternalError(err, "serialising output") - } - - return outBuf.Bytes(), nil -} - -func (st *StorageMarketState) validateDeal(vmctx types.VMContext, deal StorageDealProposal, providerWorker address.Address) aerrors.ActorError { - ctx, span := trace.StartSpan(vmctx.Context(), "validateDeal") - defer span.End() - - if vmctx.BlockHeight() > deal.ProposalExpiration { - return aerrors.New(1, "deal proposal already expired") - } - - if vmctx.Message().From != providerWorker { - return aerrors.New(2, "Deals must be submitted by the miner worker") - } - - if err := deal.Verify(); err != nil { - return aerrors.Absorb(err, 3, "verifying proposer signature") - } - - // TODO: do some caching (changes gas so needs to be in spec too) - b, bnd, aerr := GetMarketBalances(ctx, vmctx.Ipld(), st.Balances, deal.Client, providerWorker) - if aerr != nil { - return aerrors.Wrap(aerr, "getting client, and provider balances") - } - clientBalance := b[0] - providerBalance := b[1] - - totalPrice := deal.TotalStoragePrice() - - if clientBalance.Available.LessThan(totalPrice) { - return aerrors.Newf(5, "client doesn't have enough available funds to cover storage price; %d < %d", clientBalance.Available, totalPrice) - } - - clientBalance = lockFunds(clientBalance, totalPrice) - - // TODO: REVIEW: Not clear who pays for this - if providerBalance.Available.LessThan(deal.StorageCollateral) { - return aerrors.Newf(6, "provider doesn't have enough available funds to cover StorageCollateral; %d < %d", providerBalance.Available, deal.StorageCollateral) - } - - providerBalance = lockFunds(providerBalance, deal.StorageCollateral) - - // TODO: piece checks (e.g. size > sectorSize)? - - bcid, aerr := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{ - deal.Client: clientBalance, - providerWorker: providerBalance, - }) - if aerr != nil { - return aerr - } - - st.Balances = bcid - - return nil -} - -type ActivateStorageDealsParams struct { - Deals []uint64 -} - -func (sma StorageMarketActor) ActivateStorageDeals(act *types.Actor, vmctx types.VMContext, params *ActivateStorageDealsParams) ([]byte, ActorError) { - var self StorageMarketState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals) - if err != nil { - return nil, aerrors.HandleExternalError(err, "loading deals amt") - } - - for _, deal := range params.Deals { - var dealInfo OnChainDeal - if err := deals.Get(deal, &dealInfo); err != nil { - if _, is := err.(*amt.ErrNotFound); is { - return nil, aerrors.New(3, "deal not found") - } - return nil, aerrors.HandleExternalError(err, "getting deal info failed") - } - - if vmctx.Message().From != dealInfo.Provider { - return nil, aerrors.New(1, "ActivateStorageDeals can only be called by the deal provider") - } - - if vmctx.BlockHeight() > dealInfo.ProposalExpiration { - return nil, aerrors.New(2, "deal cannot be activated: proposal expired") - } - - if dealInfo.ActivationEpoch > 0 { - // this probably can't happen in practice - return nil, aerrors.New(3, "deal already active") - } - - dealInfo.ActivationEpoch = vmctx.BlockHeight() - - if err := deals.Set(deal, &dealInfo); err != nil { - return nil, aerrors.HandleExternalError(err, "setting deal info in AMT failed") - } - } - - dealsCid, err := deals.Flush() - if err != nil { - return nil, aerrors.HandleExternalError(err, "saving deals AMT") - } - - self.Deals = dealsCid - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, aerrors.HandleExternalError(err, "storing state failed") - } - - aerr := vmctx.Storage().Commit(old, nroot) - if aerr != nil { - return nil, aerr - } - - return nil, nil -} - -type ProcessStorageDealsPaymentParams struct { - DealIDs []uint64 -} - -func (sma StorageMarketActor) ProcessStorageDealsPayment(act *types.Actor, vmctx types.VMContext, params *ProcessStorageDealsPaymentParams) ([]byte, ActorError) { - var self StorageMarketState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals) - if err != nil { - return nil, aerrors.HandleExternalError(err, "loading deals amt") - } - - // TODO: Would be nice if send could assert actor type - workerBytes, aerr := vmctx.Send(vmctx.Message().From, MAMethods.GetWorkerAddr, types.NewInt(0), nil) - if aerr != nil { - return nil, aerr - } - providerWorker, err := address.NewFromBytes(workerBytes) - if err != nil { - return nil, aerrors.HandleExternalError(err, "parsing provider worker address bytes") - } - - for _, deal := range params.DealIDs { - var dealInfo OnChainDeal - if err := deals.Get(deal, &dealInfo); err != nil { - if _, is := err.(*amt.ErrNotFound); is { - return nil, aerrors.New(2, "deal not found") - } - return nil, aerrors.HandleExternalError(err, "getting deal info failed") - } - - if dealInfo.Provider != vmctx.Message().From { - return nil, aerrors.New(3, "ProcessStorageDealsPayment can only be called by deal provider") - } - - if vmctx.BlockHeight() < dealInfo.ActivationEpoch { - // TODO: This is probably fatal - return nil, aerrors.New(4, "ActivationEpoch lower than block height") - } - - if vmctx.BlockHeight() > dealInfo.ActivationEpoch+dealInfo.Duration { - // Deal expired, miner should drop it - // TODO: process payment for the remainder of last proving period - return nil, nil - } - - toPay := types.BigMul(dealInfo.StoragePricePerEpoch, types.NewInt(build.SlashablePowerDelay)) - - b, bnd, aerr := GetMarketBalances(vmctx.Context(), vmctx.Ipld(), self.Balances, dealInfo.Client, providerWorker) - if aerr != nil { - return nil, aerr - } - clientBal := b[0] - providerBal := b[1] - - clientBal.Locked, providerBal.Available = transferFunds(clientBal.Locked, providerBal.Available, toPay) - - // TODO: call set once - bcid, aerr := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{ - dealInfo.Client: clientBal, - providerWorker: providerBal, - }) - if aerr != nil { - return nil, aerr - } - - self.Balances = bcid - } - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, aerrors.HandleExternalError(err, "storing state failed") - } - - aerr = vmctx.Storage().Commit(old, nroot) - if aerr != nil { - return nil, aerr - } - - return nil, nil -} - -func lockFunds(p StorageParticipantBalance, amt types.BigInt) StorageParticipantBalance { - p.Available, p.Locked = transferFunds(p.Available, p.Locked, amt) - return p -} - -func transferFunds(from, to, amt types.BigInt) (types.BigInt, types.BigInt) { - // TODO: some asserts - return types.BigSub(from, amt), types.BigAdd(to, amt) -} - -type ComputeDataCommitmentParams struct { - DealIDs []uint64 - SectorSize uint64 -} - -func (sma StorageMarketActor) ComputeDataCommitment(act *types.Actor, vmctx types.VMContext, params *ComputeDataCommitmentParams) ([]byte, ActorError) { - var self StorageMarketState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals) - if err != nil { - return nil, aerrors.HandleExternalError(err, "loading deals amt") - } - - if len(params.DealIDs) == 0 { - return nil, aerrors.New(3, "no deal IDs") - } - - var pieces []sectorbuilder.PublicPieceInfo - for _, deal := range params.DealIDs { - var dealInfo OnChainDeal - if err := deals.Get(deal, &dealInfo); err != nil { - if _, is := err.(*amt.ErrNotFound); is { - return nil, aerrors.New(4, "deal not found") - } - return nil, aerrors.HandleExternalError(err, "getting deal info failed") - } - - if dealInfo.Provider != vmctx.Message().From { - return nil, aerrors.New(5, "referenced deal was not from caller") - } - - var commP [32]byte - copy(commP[:], dealInfo.PieceRef) - - pieces = append(pieces, sectorbuilder.PublicPieceInfo{ - Size: dealInfo.PieceSize, - CommP: commP, - }) - } - - commd, err := sectorbuilder.GenerateDataCommitment(params.SectorSize, pieces) - if err != nil { - return nil, aerrors.Absorb(err, 6, "failed to generate data commitment from pieces") - } - - return commd[:], nil -} - -/* -func (sma StorageMarketActor) HandleCronAction(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - -} - -func (sma StorageMarketActor) SettleExpiredDeals(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - -} - -func (sma StorageMarketActor) SlashStorageDealCollateral(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - -} - -func (sma StorageMarketActor) GetLastExpirationFromDealIDs(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - -} -*/ diff --git a/chain/actors/actor_storagepower.go b/chain/actors/actor_storagepower.go deleted file mode 100644 index 09a3eb30a..000000000 --- a/chain/actors/actor_storagepower.go +++ /dev/null @@ -1,797 +0,0 @@ -package actors - -import ( - "bytes" - "context" - "io" - - "github.com/filecoin-project/go-amt-ipld" - cid "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - "go.opencensus.io/trace" - xerrors "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" -) - -type StoragePowerActor struct{} - -type spaMethods struct { - Constructor uint64 - CreateStorageMiner uint64 - ArbitrateConsensusFault uint64 - UpdateStorage uint64 - GetTotalStorage uint64 - PowerLookup uint64 - IsValidMiner uint64 - PledgeCollateralForSize uint64 - CheckProofSubmissions uint64 -} - -var SPAMethods = spaMethods{1, 2, 3, 4, 5, 6, 7, 8, 9} - -func (spa StoragePowerActor) Exports() []interface{} { - return []interface{}{ - //1: spa.StoragePowerConstructor, - 2: spa.CreateStorageMiner, - 3: spa.ArbitrateConsensusFault, - 4: spa.UpdateStorage, - 5: spa.GetTotalStorage, - 6: spa.PowerLookup, - 7: spa.IsValidMiner, - 8: spa.PledgeCollateralForSize, - 9: spa.CheckProofSubmissions, - } -} - -type StoragePowerState struct { - Miners cid.Cid - ProvingBuckets cid.Cid // amt[ProvingPeriodBucket]hamt[minerAddress]struct{} - MinerCount uint64 - LastMinerCheck uint64 - - TotalStorage types.BigInt -} - -type CreateStorageMinerParams struct { - Owner address.Address - Worker address.Address - SectorSize uint64 - PeerID peer.ID -} - -func (spa StoragePowerActor) CreateStorageMiner(act *types.Actor, vmctx types.VMContext, params *CreateStorageMinerParams) ([]byte, ActorError) { - if !build.SupportedSectorSize(params.SectorSize) { - return nil, aerrors.New(1, "Unsupported sector size") - } - - var self StoragePowerState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - reqColl, err := pledgeCollateralForSize(vmctx, types.NewInt(0), self.TotalStorage, self.MinerCount+1) - if err != nil { - return nil, err - } - - if vmctx.Message().Value.LessThan(reqColl) { - return nil, aerrors.Newf(1, "not enough funds passed to cover required miner collateral (needed %s, got %s)", reqColl, vmctx.Message().Value) - } - - encoded, err := CreateExecParams(StorageMinerCodeCid, &StorageMinerConstructorParams{ - Owner: params.Owner, - Worker: params.Worker, - SectorSize: params.SectorSize, - PeerID: params.PeerID, - }) - if err != nil { - return nil, err - } - - ret, err := vmctx.Send(InitAddress, IAMethods.Exec, vmctx.Message().Value, encoded) - if err != nil { - return nil, err - } - - naddr, nerr := address.NewFromBytes(ret) - if nerr != nil { - return nil, aerrors.Absorb(nerr, 2, "could not read address of new actor") - } - - ncid, err := MinerSetAdd(context.TODO(), vmctx, self.Miners, naddr) - if err != nil { - return nil, err - } - self.Miners = ncid - self.MinerCount++ - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, err - } - - if err := vmctx.Storage().Commit(old, nroot); err != nil { - return nil, err - } - - return naddr.Bytes(), nil -} - -type ArbitrateConsensusFaultParams struct { - Block1 *types.BlockHeader - Block2 *types.BlockHeader -} - -func (spa StoragePowerActor) ArbitrateConsensusFault(act *types.Actor, vmctx types.VMContext, params *ArbitrateConsensusFaultParams) ([]byte, ActorError) { - if params.Block1.Miner != params.Block2.Miner { - return nil, aerrors.New(2, "blocks must be from the same miner") - } - - rval, err := vmctx.Send(params.Block1.Miner, MAMethods.GetWorkerAddr, types.NewInt(0), nil) - if err != nil { - return nil, aerrors.Wrap(err, "failed to get miner worker") - } - - worker, oerr := address.NewFromBytes(rval) - if oerr != nil { - // REVIEW: should this be fatal? i can't think of a real situation that would get us here - return nil, aerrors.Absorb(oerr, 3, "response from 'GetWorkerAddr' was not a valid address") - } - - if err := params.Block1.CheckBlockSignature(vmctx.Context(), worker); err != nil { - return nil, aerrors.Absorb(err, 4, "block1 did not have valid signature") - } - - if err := params.Block2.CheckBlockSignature(vmctx.Context(), worker); err != nil { - return nil, aerrors.Absorb(err, 5, "block2 did not have valid signature") - } - - // see the "Consensus Faults" section of the faults spec (faults.md) - // for details on these slashing conditions. - if !shouldSlash(params.Block1, params.Block2) { - return nil, aerrors.New(6, "blocks do not prove a slashable offense") - } - - var self StoragePowerState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - if types.BigCmp(self.TotalStorage, types.NewInt(0)) == 0 { - return nil, aerrors.Fatal("invalid state, storage power actor has zero total storage") - } - - miner := params.Block1.Miner - if has, err := MinerSetHas(vmctx, self.Miners, miner); err != nil { - return nil, aerrors.Wrapf(err, "failed to check miner in set") - } else if !has { - return nil, aerrors.New(7, "either already slashed or not a miner") - } - - minerPower, err := powerLookup(context.TODO(), vmctx, &self, miner) - if err != nil { - return nil, err - } - - slashedCollateral, err := pledgeCollateralForSize(vmctx, minerPower, self.TotalStorage, self.MinerCount) - if err != nil { - return nil, err - } - - enc, err := SerializeParams(&MinerSlashConsensusFault{ - Slasher: vmctx.Message().From, - AtHeight: params.Block1.Height, - SlashedCollateral: slashedCollateral, - }) - if err != nil { - return nil, err - } - - _, err = vmctx.Send(miner, MAMethods.SlashConsensusFault, types.NewInt(0), enc) - if err != nil { - return nil, err - } - - // Remove the miner from the list of network miners - ncid, err := MinerSetRemove(context.TODO(), vmctx, self.Miners, miner) - if err != nil { - return nil, err - } - self.Miners = ncid - self.MinerCount-- - - self.TotalStorage = types.BigSub(self.TotalStorage, minerPower) - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, err - } - - if err := vmctx.Storage().Commit(old, nroot); err != nil { - return nil, err - } - - return nil, nil -} - -func cidArrContains(a []cid.Cid, b cid.Cid) bool { - for _, c := range a { - if b == c { - return true - } - } - - return false -} - -func shouldSlash(block1, block2 *types.BlockHeader) bool { - // First slashing condition, blocks have the same ticket round - if block1.Height == block2.Height { - return true - } - - /* Second slashing condition requires having access to the parent tipset blocks - // This might not always be available, needs some thought on the best way to deal with this - - - // Second slashing condition, miner ignored own block when mining - // Case A: block2 could have been in block1's parent set but is not - b1ParentHeight := block1.Height - len(block1.Tickets) - - block1ParentTipSet := block1.Parents - if !cidArrContains(block1.Parents, block2.Cid()) && - b1ParentHeight == block2.Height && - block1ParentTipSet.ParentCids == block2.ParentCids { - return true - } - - // Case B: block1 could have been in block2's parent set but is not - block2ParentTipSet := parentOf(block2) - if !block2Parent.contains(block1) && - block2ParentTipSet.Height == block1.Height && - block2ParentTipSet.ParentCids == block1.ParentCids { - return true - } - - */ - - return false -} - -type UpdateStorageParams struct { - Delta types.BigInt - NextSlashDeadline uint64 - PreviousSlashDeadline uint64 -} - -func (spa StoragePowerActor) UpdateStorage(act *types.Actor, vmctx types.VMContext, params *UpdateStorageParams) ([]byte, ActorError) { - var self StoragePowerState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - has, err := MinerSetHas(vmctx, self.Miners, vmctx.Message().From) - if err != nil { - return nil, err - } - if !has { - return nil, aerrors.New(1, "update storage must only be called by a miner actor") - } - - self.TotalStorage = types.BigAdd(self.TotalStorage, params.Delta) - - previousBucket := params.PreviousSlashDeadline % build.SlashablePowerDelay - nextBucket := params.NextSlashDeadline % build.SlashablePowerDelay - - if previousBucket == nextBucket && params.PreviousSlashDeadline != 0 { - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, err - } - - if err := vmctx.Storage().Commit(old, nroot); err != nil { - return nil, err - } - - return nil, nil // Nothing to do - } - - buckets, eerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingBuckets) - if eerr != nil { - return nil, aerrors.HandleExternalError(eerr, "loading proving buckets amt") - } - - if params.PreviousSlashDeadline != 0 { // delete from previous bucket - err := deleteMinerFromBucket(vmctx, buckets, previousBucket) - if err != nil { - return nil, aerrors.Wrapf(err, "delete from bucket %d, next %d", previousBucket, nextBucket) - } - } - - err = addMinerToBucket(vmctx, buckets, nextBucket) - if err != nil { - return nil, err - } - - self.ProvingBuckets, eerr = buckets.Flush() - if eerr != nil { - return nil, aerrors.HandleExternalError(eerr, "flushing proving buckets") - } - - nroot, err := vmctx.Storage().Put(&self) - if err != nil { - return nil, err - } - - if err := vmctx.Storage().Commit(old, nroot); err != nil { - return nil, err - } - - return nil, nil -} - -func deleteMinerFromBucket(vmctx types.VMContext, buckets *amt.Root, previousBucket uint64) aerrors.ActorError { - var bucket cid.Cid - err := buckets.Get(previousBucket, &bucket) - switch err.(type) { - case *amt.ErrNotFound: - return aerrors.HandleExternalError(err, "proving bucket missing") - case nil: // noop - default: - return aerrors.HandleExternalError(err, "getting proving bucket") - } - - bhamt, err := hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), bucket) - if err != nil { - return aerrors.HandleExternalError(err, "failed to load proving bucket") - } - err = bhamt.Delete(vmctx.Context(), string(vmctx.Message().From.Bytes())) - if err != nil { - return aerrors.HandleExternalError(err, "deleting miner from proving bucket") - } - - err = bhamt.Flush(vmctx.Context()) - if err != nil { - return aerrors.HandleExternalError(err, "flushing previous proving bucket") - } - - bucket, err = vmctx.Ipld().Put(vmctx.Context(), bhamt) - if err != nil { - return aerrors.HandleExternalError(err, "putting previous proving bucket hamt") - } - - err = buckets.Set(previousBucket, bucket) - if err != nil { - return aerrors.HandleExternalError(err, "setting previous proving bucket cid in amt") - } - - return nil -} - -func addMinerToBucket(vmctx types.VMContext, buckets *amt.Root, nextBucket uint64) aerrors.ActorError { - var bhamt *hamt.Node - var bucket cid.Cid - err := buckets.Get(nextBucket, &bucket) - switch err.(type) { - case *amt.ErrNotFound: - bhamt = hamt.NewNode(vmctx.Ipld()) - case nil: - bhamt, err = hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), bucket) - if err != nil { - return aerrors.HandleExternalError(err, "failed to load proving bucket") - } - default: - return aerrors.HandleExternalError(err, "getting proving bucket") - } - - err = bhamt.Set(vmctx.Context(), string(vmctx.Message().From.Bytes()), CborNull) - if err != nil { - return aerrors.HandleExternalError(err, "setting miner in proving bucket") - } - - err = bhamt.Flush(vmctx.Context()) - if err != nil { - return aerrors.HandleExternalError(err, "flushing previous proving bucket") - } - - bucket, err = vmctx.Ipld().Put(vmctx.Context(), bhamt) - if err != nil { - return aerrors.HandleExternalError(err, "putting previous proving bucket hamt") - } - - err = buckets.Set(nextBucket, bucket) - if err != nil { - return aerrors.HandleExternalError(err, "setting previous proving bucket cid in amt") - } - return nil -} - -func (spa StoragePowerActor) GetTotalStorage(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) { - var self StoragePowerState - if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil { - return nil, err - } - - return self.TotalStorage.Bytes(), nil -} - -type PowerLookupParams struct { - Miner address.Address -} - -func (spa StoragePowerActor) PowerLookup(act *types.Actor, vmctx types.VMContext, params *PowerLookupParams) ([]byte, ActorError) { - var self StoragePowerState - if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil { - return nil, aerrors.Wrap(err, "getting head") - } - - pow, err := powerLookup(context.TODO(), vmctx, &self, params.Miner) - if err != nil { - return nil, err - } - - return pow.Bytes(), nil -} - -func powerLookup(ctx context.Context, vmctx types.VMContext, self *StoragePowerState, miner address.Address) (types.BigInt, ActorError) { - has, err := MinerSetHas(vmctx, self.Miners, miner) - if err != nil { - return types.EmptyInt, err - } - - if !has { - return types.EmptyInt, aerrors.New(1, "miner not registered with storage power actor") - } - - // TODO: Use local amt - ret, err := vmctx.Send(miner, MAMethods.GetPower, types.NewInt(0), nil) - if err != nil { - return types.EmptyInt, aerrors.Wrap(err, "invoke Miner.GetPower") - } - - return types.BigFromBytes(ret), nil -} - -type IsValidMinerParam struct { - Addr address.Address -} - -func (spa StoragePowerActor) IsValidMiner(act *types.Actor, vmctx types.VMContext, param *IsValidMinerParam) ([]byte, ActorError) { - var self StoragePowerState - if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil { - return nil, err - } - - has, err := MinerSetHas(vmctx, self.Miners, param.Addr) - if err != nil { - return nil, err - } - - if !has { - log.Warnf("Miner INVALID: not in set: %s", param.Addr) - - return cbg.CborBoolFalse, nil - } - - ret, err := vmctx.Send(param.Addr, MAMethods.IsSlashed, types.NewInt(0), nil) - if err != nil { - return nil, err - } - - slashed := bytes.Equal(ret, cbg.CborBoolTrue) - - if slashed { - log.Warnf("Miner INVALID: /SLASHED/ : %s", param.Addr) - } - - return cbg.EncodeBool(!slashed), nil -} - -type PledgeCollateralParams struct { - Size types.BigInt -} - -func (spa StoragePowerActor) PledgeCollateralForSize(act *types.Actor, vmctx types.VMContext, param *PledgeCollateralParams) ([]byte, ActorError) { - var self StoragePowerState - if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil { - return nil, err - } - - totalCollateral, err := pledgeCollateralForSize(vmctx, param.Size, self.TotalStorage, self.MinerCount) - if err != nil { - return nil, err - } - - return totalCollateral.Bytes(), nil -} - -func pledgeCollateralForSize(vmctx types.VMContext, size, totalStorage types.BigInt, minerCount uint64) (types.BigInt, aerrors.ActorError) { - netBalance, err := vmctx.GetBalance(NetworkAddress) - if err != nil { - return types.EmptyInt, err - } - - // TODO: the spec says to also grab 'total vested filecoin' and include it as available - // If we don't factor that in, we effectively assume all of the locked up filecoin is 'available' - // the blocker on that right now is that its hard to tell how much filecoin is unlocked - - availableFilecoin := types.BigSub( - types.BigMul(types.NewInt(build.TotalFilecoin), types.NewInt(build.FilecoinPrecision)), - netBalance, - ) - - totalPowerCollateral := types.BigDiv( - types.BigMul( - availableFilecoin, - types.NewInt(build.PowerCollateralProportion), - ), - types.NewInt(build.CollateralPrecision), - ) - - totalPerCapitaCollateral := types.BigDiv( - types.BigMul( - availableFilecoin, - types.NewInt(build.PerCapitaCollateralProportion), - ), - types.NewInt(build.CollateralPrecision), - ) - - // REVIEW: for bootstrapping purposes, we skip the power portion of the - // collateral if there is no collateral in the network yet - powerCollateral := types.NewInt(0) - if types.BigCmp(totalStorage, types.NewInt(0)) != 0 { - powerCollateral = types.BigDiv( - types.BigMul( - totalPowerCollateral, - size, - ), - totalStorage, - ) - } - - perCapCollateral := types.BigDiv( - totalPerCapitaCollateral, - types.NewInt(minerCount), - ) - - return types.BigAdd(powerCollateral, perCapCollateral), nil -} - -func (spa StoragePowerActor) CheckProofSubmissions(act *types.Actor, vmctx types.VMContext, param *struct{}) ([]byte, ActorError) { - if vmctx.Message().From != CronAddress { - return nil, aerrors.New(1, "CheckProofSubmissions is only callable from the cron actor") - } - - var self StoragePowerState - old := vmctx.Storage().GetHead() - if err := vmctx.Storage().Get(old, &self); err != nil { - return nil, err - } - - for i := self.LastMinerCheck; i < vmctx.BlockHeight(); i++ { - height := i + 1 - - err := checkProofSubmissionsAtH(vmctx, &self, height) - if err != nil { - return nil, err - } - } - - self.LastMinerCheck = vmctx.BlockHeight() - - nroot, aerr := vmctx.Storage().Put(&self) - if aerr != nil { - return nil, aerr - } - - if err := vmctx.Storage().Commit(old, nroot); err != nil { - return nil, err - } - - return nil, nil -} - -func checkProofSubmissionsAtH(vmctx types.VMContext, self *StoragePowerState, height uint64) aerrors.ActorError { - bucketID := height % build.SlashablePowerDelay - - buckets, eerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingBuckets) - if eerr != nil { - return aerrors.HandleExternalError(eerr, "loading proving buckets amt") - } - - var bucket cid.Cid - err := buckets.Get(bucketID, &bucket) - switch err.(type) { - case *amt.ErrNotFound: - return nil // nothing to do - case nil: - default: - return aerrors.HandleExternalError(err, "getting proving bucket") - } - - bhamt, err := hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), bucket) - if err != nil { - return aerrors.HandleExternalError(err, "failed to load proving bucket") - } - - err = bhamt.ForEach(vmctx.Context(), func(k string, val interface{}) error { - _, span := trace.StartSpan(vmctx.Context(), "StoragePowerActor.CheckProofSubmissions.loop") - defer span.End() - - maddr, err := address.NewFromBytes([]byte(k)) - if err != nil { - return aerrors.Escalate(err, "parsing miner address") - } - - span.AddAttributes(trace.StringAttribute("miner", maddr.String())) - - params, err := SerializeParams(&CheckMinerParams{NetworkPower: self.TotalStorage}) - if err != nil { - return err - } - - ret, err := vmctx.Send(maddr, MAMethods.CheckMiner, types.NewInt(0), params) - if err != nil { - return err - } - - if len(ret) == 0 { - return nil // miner is fine - } - - var power types.BigInt - if err := power.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { - return xerrors.Errorf("unmarshaling CheckMiner response (%x): %w", ret, err) - } - - if power.GreaterThan(types.NewInt(0)) { - log.Warnf("slashing miner %s for missed PoSt (%s B, H: %d, Bucket: %d)", maddr, power, height, bucketID) - - self.TotalStorage = types.BigSub(self.TotalStorage, power) - } - return nil - }) - - if err != nil { - return aerrors.HandleExternalError(err, "iterating miners in proving bucket") - } - - return nil -} - -func MinerSetHas(vmctx types.VMContext, rcid cid.Cid, maddr address.Address) (bool, aerrors.ActorError) { - nd, err := hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), rcid) - if err != nil { - return false, aerrors.HandleExternalError(err, "failed to load miner set") - } - - err = nd.Find(vmctx.Context(), string(maddr.Bytes()), nil) - switch err { - case hamt.ErrNotFound: - return false, nil - case nil: - return true, nil - default: - return false, aerrors.HandleExternalError(err, "failed to do set lookup") - } -} - -func MinerSetList(ctx context.Context, cst *hamt.CborIpldStore, rcid cid.Cid) ([]address.Address, error) { - nd, err := hamt.LoadNode(ctx, cst, rcid) - if err != nil { - return nil, xerrors.Errorf("failed to load miner set: %w", err) - } - - var out []address.Address - err = nd.ForEach(ctx, func(k string, val interface{}) error { - addr, err := address.NewFromBytes([]byte(k)) - if err != nil { - return err - } - out = append(out, addr) - return nil - }) - if err != nil { - return nil, err - } - - return out, nil -} - -func MinerSetAdd(ctx context.Context, vmctx types.VMContext, rcid cid.Cid, maddr address.Address) (cid.Cid, aerrors.ActorError) { - nd, err := hamt.LoadNode(ctx, vmctx.Ipld(), rcid) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to load miner set") - } - - mkey := string(maddr.Bytes()) - err = nd.Find(ctx, mkey, nil) - if err == nil { - return cid.Undef, aerrors.New(20, "miner already in set") - } - - if !xerrors.Is(err, hamt.ErrNotFound) { - return cid.Undef, aerrors.HandleExternalError(err, "failed to do miner set check") - } - - if err := nd.Set(ctx, mkey, uint64(1)); err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "adding miner address to set failed") - } - - if err := nd.Flush(ctx); err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to flush miner set") - } - - c, err := vmctx.Ipld().Put(ctx, nd) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to persist miner set to storage") - } - - return c, nil -} - -func MinerSetRemove(ctx context.Context, vmctx types.VMContext, rcid cid.Cid, maddr address.Address) (cid.Cid, aerrors.ActorError) { - nd, err := hamt.LoadNode(ctx, vmctx.Ipld(), rcid) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to load miner set") - } - - mkey := string(maddr.Bytes()) - switch nd.Delete(ctx, mkey) { - default: - return cid.Undef, aerrors.HandleExternalError(err, "failed to delete miner from set") - case hamt.ErrNotFound: - return cid.Undef, aerrors.New(1, "miner not found in set on delete") - case nil: - } - - if err := nd.Flush(ctx); err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to flush miner set") - } - - c, err := vmctx.Ipld().Put(ctx, nd) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, "failed to persist miner set to storage") - } - - return c, nil -} - -type cbgNull struct{} - -var CborNull = &cbgNull{} - -func (cbgNull) MarshalCBOR(w io.Writer) error { - n, err := w.Write(cbg.CborNull) - if err != nil { - return err - } - if n != 1 { - return xerrors.New("expected to write 1 byte") - } - return nil -} - -func (cbgNull) UnmarshalCBOR(r io.Reader) error { - b := [1]byte{} - n, err := r.Read(b[:]) - if err != nil { - return err - } - if n != 1 { - return xerrors.New("expected 1 byte") - } - if !bytes.Equal(b[:], cbg.CborNull) { - return xerrors.New("expected cbor null") - } - return nil -} diff --git a/chain/actors/actor_storagepower_test.go b/chain/actors/actor_storagepower_test.go deleted file mode 100644 index 3e6f47f29..000000000 --- a/chain/actors/actor_storagepower_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package actors_test - -import ( - "context" - "fmt" - "testing" - - "github.com/filecoin-project/lotus/build" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-address" - . "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" - - cid "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - mh "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/assert" -) - -func TestStorageMarketCreateAndSlashMiner(t *testing.T) { - var ownerAddr, workerAddr address.Address - - opts := []HarnessOpt{ - HarnessAddr(&ownerAddr, 1000000), - HarnessAddr(&workerAddr, 100000), - } - - h := NewHarness(t, opts...) - - var minerAddr address.Address - { - // cheating the bootstrapping problem - cheatStorageMarketTotal(t, h.vm, h.cs.Blockstore()) - - ret, _ := h.InvokeWithValue(t, ownerAddr, StoragePowerAddress, SPAMethods.CreateStorageMiner, - types.NewInt(500000), - &CreateStorageMinerParams{ - Owner: ownerAddr, - Worker: workerAddr, - SectorSize: build.SectorSizes[0], - PeerID: "fakepeerid", - }) - ApplyOK(t, ret) - var err error - minerAddr, err = address.NewFromBytes(ret.Return) - assert.NoError(t, err) - } - - { - ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.IsValidMiner, - &IsValidMinerParam{Addr: minerAddr}) - ApplyOK(t, ret) - - var output bool - err := cbor.DecodeInto(ret.Return, &output) - if err != nil { - t.Fatalf("error decoding: %+v", err) - } - - if !output { - t.Fatalf("%s is miner but IsValidMiner call returned false", minerAddr) - } - } - - { - ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.PowerLookup, - &PowerLookupParams{Miner: minerAddr}) - ApplyOK(t, ret) - power := types.BigFromBytes(ret.Return) - - if types.BigCmp(power, types.NewInt(0)) != 0 { - t.Fatalf("power should be zero, is: %s", power) - } - } - - { - ret, _ := h.Invoke(t, ownerAddr, minerAddr, MAMethods.GetOwner, nil) - ApplyOK(t, ret) - oA, err := address.NewFromBytes(ret.Return) - assert.NoError(t, err) - assert.Equal(t, ownerAddr, oA, "return from GetOwner should be equal to the owner") - } - - { - b1 := fakeBlock(t, minerAddr, 100) - b2 := fakeBlock(t, minerAddr, 101) - - signBlock(t, h.w, workerAddr, b1) - signBlock(t, h.w, workerAddr, b2) - - ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.ArbitrateConsensusFault, - &ArbitrateConsensusFaultParams{ - Block1: b1, - Block2: b2, - }) - ApplyOK(t, ret) - } - - { - ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.PowerLookup, - &PowerLookupParams{Miner: minerAddr}) - assert.Equal(t, ret.ExitCode, byte(1)) - } - - { - ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.IsValidMiner, &IsValidMinerParam{minerAddr}) - ApplyOK(t, ret) - assert.Equal(t, ret.Return, cbg.CborBoolFalse) - } -} - -func cheatStorageMarketTotal(t *testing.T, vm *vm.VM, bs bstore.Blockstore) { - t.Helper() - - sma, err := vm.StateTree().GetActor(StoragePowerAddress) - if err != nil { - t.Fatal(err) - } - - cst := hamt.CSTFromBstore(bs) - - var smastate StoragePowerState - if err := cst.Get(context.TODO(), sma.Head, &smastate); err != nil { - t.Fatal(err) - } - - smastate.TotalStorage = types.NewInt(10000) - - c, err := cst.Put(context.TODO(), &smastate) - if err != nil { - t.Fatal(err) - } - - sma.Head = c - - if err := vm.StateTree().SetActor(StoragePowerAddress, sma); err != nil { - t.Fatal(err) - } -} - -func fakeBlock(t *testing.T, minerAddr address.Address, ts uint64) *types.BlockHeader { - c := fakeCid(t, 1) - return &types.BlockHeader{Height: 5, Miner: minerAddr, Timestamp: ts, ParentStateRoot: c, Messages: c, ParentMessageReceipts: c, BLSAggregate: types.Signature{Type: types.KTBLS}} -} - -func fakeCid(t *testing.T, s int) cid.Cid { - t.Helper() - c, err := cid.NewPrefixV1(cid.Raw, mh.IDENTITY).Sum([]byte(fmt.Sprintf("%d", s))) - if err != nil { - t.Fatal(err) - } - - return c -} - -func signBlock(t *testing.T, w *wallet.Wallet, worker address.Address, blk *types.BlockHeader) { - t.Helper() - sb, err := blk.SigningBytes() - if err != nil { - t.Fatal(err) - } - - sig, err := w.Sign(context.TODO(), worker, sb) - if err != nil { - t.Fatal(err) - } - - blk.BlockSig = sig -} diff --git a/chain/actors/actors.go b/chain/actors/actors.go deleted file mode 100644 index 44c790fb2..000000000 --- a/chain/actors/actors.go +++ /dev/null @@ -1,52 +0,0 @@ -package actors - -import ( - "github.com/filecoin-project/go-address" - - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" -) - -var AccountCodeCid cid.Cid -var CronCodeCid cid.Cid -var StoragePowerCodeCid cid.Cid -var StorageMarketCodeCid cid.Cid -var StorageMinerCodeCid cid.Cid -var MultisigCodeCid cid.Cid -var InitCodeCid cid.Cid -var PaymentChannelCodeCid cid.Cid - -var InitAddress = mustIDAddress(0) -var NetworkAddress = mustIDAddress(1) -var StoragePowerAddress = mustIDAddress(2) -var StorageMarketAddress = mustIDAddress(3) // TODO: missing from spec -var CronAddress = mustIDAddress(4) -var BurntFundsAddress = mustIDAddress(99) - -func mustIDAddress(i uint64) address.Address { - a, err := address.NewIDAddress(i) - if err != nil { - panic(err) // ok - } - return a -} - -func init() { - pref := cid.NewPrefixV1(cid.Raw, mh.IDENTITY) - mustSum := func(s string) cid.Cid { - c, err := pref.Sum([]byte(s)) - if err != nil { - panic(err) // ok - } - return c - } - - AccountCodeCid = mustSum("fil/1/account") // TODO: spec - CronCodeCid = mustSum("fil/1/cron") - StoragePowerCodeCid = mustSum("fil/1/power") - StorageMarketCodeCid = mustSum("fil/1/market") - StorageMinerCodeCid = mustSum("fil/1/miner") - MultisigCodeCid = mustSum("fil/1/multisig") - InitCodeCid = mustSum("fil/1/init") - PaymentChannelCodeCid = mustSum("fil/1/paych") -} diff --git a/chain/actors/actors_test.go b/chain/actors/actors_test.go deleted file mode 100644 index 64207b422..000000000 --- a/chain/actors/actors_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package actors_test - -import ( - "context" - "encoding/binary" - "fmt" - "testing" - - "github.com/filecoin-project/lotus/build" - - "github.com/filecoin-project/go-address" - . "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - dstore "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" -) - -func blsaddr(n uint64) address.Address { - buf := make([]byte, 48) - binary.PutUvarint(buf, n) - - addr, err := address.NewBLSAddress(buf) - if err != nil { - panic(err) // ok - } - - return addr -} - -func setupVMTestEnv(t *testing.T) (*vm.VM, []address.Address, bstore.Blockstore) { - bs := bstore.NewBlockstore(dstore.NewMapDatastore()) - - from := blsaddr(0) - maddr := blsaddr(1) - - actors := map[address.Address]types.BigInt{ - from: types.NewInt(1000000), - maddr: types.NewInt(0), - } - st, err := gen.MakeInitialStateTree(bs, actors) - if err != nil { - t.Fatal(err) - } - - stateroot, err := st.Flush() - if err != nil { - t.Fatal(err) - } - - cs := store.NewChainStore(bs, nil) - - // TODO: should probabaly mock out the randomness bit, nil works for now - vm, err := vm.NewVM(stateroot, 1, nil, maddr, cs.Blockstore()) - if err != nil { - t.Fatal(err) - } - return vm, []address.Address{from, maddr}, bs -} - -func TestVMInvokeMethod(t *testing.T) { - vm, addrs, _ := setupVMTestEnv(t) - from := addrs[0] - - var err error - cenc, err := SerializeParams(&StorageMinerConstructorParams{Owner: from, Worker: from}) - if err != nil { - t.Fatal(err) - } - - execparams := &ExecParams{ - Code: StorageMinerCodeCid, - Params: cenc, - } - enc, err := SerializeParams(execparams) - if err != nil { - t.Fatal(err) - } - - msg := &types.Message{ - To: InitAddress, - From: from, - Method: IAMethods.Exec, - Params: enc, - GasPrice: types.NewInt(1), - GasLimit: types.NewInt(10000), - Value: types.NewInt(0), - } - - ret, err := vm.ApplyMessage(context.TODO(), msg) - if err != nil { - t.Fatal(err) - } - - if ret.ExitCode != 0 { - t.Fatal("invocation failed") - } - - outaddr, err := address.NewFromBytes(ret.Return) - if err != nil { - t.Fatal(err) - } - - if outaddr.String() != "t0102" { - t.Fatal("hold up") - } -} - -func TestStorageMarketActorCreateMiner(t *testing.T) { - vm, addrs, bs := setupVMTestEnv(t) - from := addrs[0] - maddr := addrs[1] - - cheatStorageMarketTotal(t, vm, bs) - - params := &StorageMinerConstructorParams{ - Owner: maddr, - Worker: maddr, - SectorSize: build.SectorSizes[0], - PeerID: "fakepeerid", - } - var err error - enc, err := SerializeParams(params) - if err != nil { - t.Fatal(err) - } - - msg := &types.Message{ - To: StoragePowerAddress, - From: from, - Method: SPAMethods.CreateStorageMiner, - Params: enc, - GasPrice: types.NewInt(1), - GasLimit: types.NewInt(10000), - Value: types.NewInt(50000), - } - - ret, err := vm.ApplyMessage(context.TODO(), msg) - if err != nil { - t.Fatal(err) - } - - if ret.ExitCode != 0 { - fmt.Println(ret.ActorErr) - t.Fatal("invocation failed: ", ret.ExitCode) - } - - outaddr, err := address.NewFromBytes(ret.Return) - if err != nil { - t.Fatal(err) - } - - if outaddr.String() != "t0102" { - t.Fatal("hold up") - } -} diff --git a/chain/actors/address.go b/chain/actors/address.go deleted file mode 100644 index eb3bcbff3..000000000 --- a/chain/actors/address.go +++ /dev/null @@ -1,14 +0,0 @@ -package actors - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors/aerrors" -) - -func NewIDAddress(id uint64) (address.Address, ActorError) { - a, err := address.NewIDAddress(id) - if err != nil { - return address.Undef, aerrors.Escalate(err, "could not create ID Address") - } - return a, nil -} diff --git a/chain/actors/adt/adt.go b/chain/actors/adt/adt.go new file mode 100644 index 000000000..39dd5cebc --- /dev/null +++ b/chain/actors/adt/adt.go @@ -0,0 +1,65 @@ +package adt + +import ( + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors/builtin" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +type Map interface { + Root() (cid.Cid, error) + + Put(k abi.Keyer, v cbor.Marshaler) error + Get(k abi.Keyer, v cbor.Unmarshaler) (bool, error) + Delete(k abi.Keyer) error + + ForEach(v cbor.Unmarshaler, fn func(key string) error) error +} + +func AsMap(store Store, root cid.Cid, version builtin.Version) (Map, error) { + switch version { + case builtin.Version0: + return adt0.AsMap(store, root) + } + return nil, xerrors.Errorf("unknown network version: %d", version) +} + +func NewMap(store Store, version builtin.Version) (Map, error) { + switch version { + case builtin.Version0: + return adt0.MakeEmptyMap(store), nil + } + return nil, xerrors.Errorf("unknown network version: %d", version) +} + +type Array interface { + Root() (cid.Cid, error) + + Set(idx uint64, v cbor.Marshaler) error + Get(idx uint64, v cbor.Unmarshaler) (bool, error) + Delete(idx uint64) error + Length() uint64 + + ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error +} + +func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) { + switch builtin.VersionForNetwork(version) { + case builtin.Version0: + return adt0.AsArray(store, root) + } + return nil, xerrors.Errorf("unknown network version: %d", version) +} + +func NewArray(store Store, version builtin.Version) (Array, error) { + switch version { + case builtin.Version0: + return adt0.MakeEmptyArray(store), nil + } + return nil, xerrors.Errorf("unknown network version: %d", version) +} diff --git a/chain/actors/adt/diff_adt.go b/chain/actors/adt/diff_adt.go new file mode 100644 index 000000000..160e12e19 --- /dev/null +++ b/chain/actors/adt/diff_adt.go @@ -0,0 +1,122 @@ +package adt + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/abi" + typegen "github.com/whyrusleeping/cbor-gen" +) + +// AdtArrayDiff generalizes adt.Array diffing by accepting a Deferred type that can unmarshalled to its corresponding struct +// in an interface implantation. +// Add should be called when a new k,v is added to the array +// Modify should be called when a value is modified in the array +// Remove should be called when a value is removed from the array +type AdtArrayDiff interface { + Add(key uint64, val *typegen.Deferred) error + Modify(key uint64, from, to *typegen.Deferred) error + Remove(key uint64, val *typegen.Deferred) error +} + +// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104 +// CBOR Marshaling will likely be the largest performance bottleneck here. + +// DiffAdtArray accepts two *adt.Array's and an AdtArrayDiff implementation. It does the following: +// - All values that exist in preArr and not in curArr are passed to AdtArrayDiff.Remove() +// - All values that exist in curArr nnd not in prevArr are passed to adtArrayDiff.Add() +// - All values that exist in preArr and in curArr are passed to AdtArrayDiff.Modify() +// - It is the responsibility of AdtArrayDiff.Modify() to determine if the values it was passed have been modified. +func DiffAdtArray(preArr, curArr Array, out AdtArrayDiff) error { + notNew := make(map[int64]struct{}, curArr.Length()) + prevVal := new(typegen.Deferred) + if err := preArr.ForEach(prevVal, func(i int64) error { + curVal := new(typegen.Deferred) + found, err := curArr.Get(uint64(i), curVal) + if err != nil { + return err + } + if !found { + if err := out.Remove(uint64(i), prevVal); err != nil { + return err + } + return nil + } + + // no modification + if !bytes.Equal(prevVal.Raw, curVal.Raw) { + if err := out.Modify(uint64(i), prevVal, curVal); err != nil { + return err + } + } + notNew[i] = struct{}{} + return nil + }); err != nil { + return err + } + + curVal := new(typegen.Deferred) + return curArr.ForEach(curVal, func(i int64) error { + if _, ok := notNew[i]; ok { + return nil + } + return out.Add(uint64(i), curVal) + }) +} + +// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104 +// CBOR Marshaling will likely be the largest performance bottleneck here. + +// AdtMapDiff generalizes adt.Map diffing by accepting a Deferred type that can unmarshalled to its corresponding struct +// in an interface implantation. +// AsKey should return the Keyer implementation specific to the map +// Add should be called when a new k,v is added to the map +// Modify should be called when a value is modified in the map +// Remove should be called when a value is removed from the map +type AdtMapDiff interface { + AsKey(key string) (abi.Keyer, error) + Add(key string, val *typegen.Deferred) error + Modify(key string, from, to *typegen.Deferred) error + Remove(key string, val *typegen.Deferred) error +} + +func DiffAdtMap(preMap, curMap Map, out AdtMapDiff) error { + notNew := make(map[string]struct{}) + prevVal := new(typegen.Deferred) + if err := preMap.ForEach(prevVal, func(key string) error { + curVal := new(typegen.Deferred) + k, err := out.AsKey(key) + if err != nil { + return err + } + + found, err := curMap.Get(k, curVal) + if err != nil { + return err + } + if !found { + if err := out.Remove(key, prevVal); err != nil { + return err + } + return nil + } + + // no modification + if !bytes.Equal(prevVal.Raw, curVal.Raw) { + if err := out.Modify(key, prevVal, curVal); err != nil { + return err + } + } + notNew[key] = struct{}{} + return nil + }); err != nil { + return err + } + + curVal := new(typegen.Deferred) + return curMap.ForEach(curVal, func(key string) error { + if _, ok := notNew[key]; ok { + return nil + } + return out.Add(key, curVal) + }) +} diff --git a/chain/actors/adt/diff_adt_test.go b/chain/actors/adt/diff_adt_test.go new file mode 100644 index 000000000..1c0726003 --- /dev/null +++ b/chain/actors/adt/diff_adt_test.go @@ -0,0 +1,300 @@ +package adt + +import ( + "bytes" + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cbornode "github.com/ipfs/go-ipld-cbor" + typegen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/runtime" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +func TestDiffAdtArray(t *testing.T) { + ctxstoreA := newContextStore() + ctxstoreB := newContextStore() + + arrA := adt0.MakeEmptyArray(ctxstoreA) + arrB := adt0.MakeEmptyArray(ctxstoreB) + + require.NoError(t, arrA.Set(0, runtime.CBORBytes([]byte{0}))) // delete + + require.NoError(t, arrA.Set(1, runtime.CBORBytes([]byte{0}))) // modify + require.NoError(t, arrB.Set(1, runtime.CBORBytes([]byte{1}))) + + require.NoError(t, arrA.Set(2, runtime.CBORBytes([]byte{1}))) // delete + + require.NoError(t, arrA.Set(3, runtime.CBORBytes([]byte{0}))) // noop + require.NoError(t, arrB.Set(3, runtime.CBORBytes([]byte{0}))) + + require.NoError(t, arrA.Set(4, runtime.CBORBytes([]byte{0}))) // modify + require.NoError(t, arrB.Set(4, runtime.CBORBytes([]byte{6}))) + + require.NoError(t, arrB.Set(5, runtime.CBORBytes{8})) // add + require.NoError(t, arrB.Set(6, runtime.CBORBytes{9})) // add + + changes := new(TestDiffArray) + + assert.NoError(t, DiffAdtArray(arrA, arrB, changes)) + assert.NotNil(t, changes) + + assert.Equal(t, 2, len(changes.Added)) + // keys 5 and 6 were added + assert.EqualValues(t, uint64(5), changes.Added[0].key) + assert.EqualValues(t, []byte{8}, changes.Added[0].val) + assert.EqualValues(t, uint64(6), changes.Added[1].key) + assert.EqualValues(t, []byte{9}, changes.Added[1].val) + + assert.Equal(t, 2, len(changes.Modified)) + // keys 1 and 4 were modified + assert.EqualValues(t, uint64(1), changes.Modified[0].From.key) + assert.EqualValues(t, []byte{0}, changes.Modified[0].From.val) + assert.EqualValues(t, uint64(1), changes.Modified[0].To.key) + assert.EqualValues(t, []byte{1}, changes.Modified[0].To.val) + assert.EqualValues(t, uint64(4), changes.Modified[1].From.key) + assert.EqualValues(t, []byte{0}, changes.Modified[1].From.val) + assert.EqualValues(t, uint64(4), changes.Modified[1].To.key) + assert.EqualValues(t, []byte{6}, changes.Modified[1].To.val) + + assert.Equal(t, 2, len(changes.Removed)) + // keys 0 and 2 were deleted + assert.EqualValues(t, uint64(0), changes.Removed[0].key) + assert.EqualValues(t, []byte{0}, changes.Removed[0].val) + assert.EqualValues(t, uint64(2), changes.Removed[1].key) + assert.EqualValues(t, []byte{1}, changes.Removed[1].val) +} + +func TestDiffAdtMap(t *testing.T) { + ctxstoreA := newContextStore() + ctxstoreB := newContextStore() + + mapA := adt0.MakeEmptyMap(ctxstoreA) + mapB := adt0.MakeEmptyMap(ctxstoreB) + + require.NoError(t, mapA.Put(abi.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete + + require.NoError(t, mapA.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify + require.NoError(t, mapB.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{1}))) + + require.NoError(t, mapA.Put(abi.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete + + require.NoError(t, mapA.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop + require.NoError(t, mapB.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) + + require.NoError(t, mapA.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify + require.NoError(t, mapB.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{6}))) + + require.NoError(t, mapB.Put(abi.UIntKey(5), runtime.CBORBytes{8})) // add + require.NoError(t, mapB.Put(abi.UIntKey(6), runtime.CBORBytes{9})) // add + + changes := new(TestDiffMap) + + assert.NoError(t, DiffAdtMap(mapA, mapB, changes)) + assert.NotNil(t, changes) + + assert.Equal(t, 2, len(changes.Added)) + // keys 5 and 6 were added + assert.EqualValues(t, uint64(6), changes.Added[0].key) + assert.EqualValues(t, []byte{9}, changes.Added[0].val) + assert.EqualValues(t, uint64(5), changes.Added[1].key) + assert.EqualValues(t, []byte{8}, changes.Added[1].val) + + assert.Equal(t, 2, len(changes.Modified)) + // keys 1 and 4 were modified + assert.EqualValues(t, uint64(1), changes.Modified[0].From.key) + assert.EqualValues(t, []byte{0}, changes.Modified[0].From.val) + assert.EqualValues(t, uint64(1), changes.Modified[0].To.key) + assert.EqualValues(t, []byte{1}, changes.Modified[0].To.val) + assert.EqualValues(t, uint64(4), changes.Modified[1].From.key) + assert.EqualValues(t, []byte{0}, changes.Modified[1].From.val) + assert.EqualValues(t, uint64(4), changes.Modified[1].To.key) + assert.EqualValues(t, []byte{6}, changes.Modified[1].To.val) + + assert.Equal(t, 2, len(changes.Removed)) + // keys 0 and 2 were deleted + assert.EqualValues(t, uint64(0), changes.Removed[0].key) + assert.EqualValues(t, []byte{0}, changes.Removed[0].val) + assert.EqualValues(t, uint64(2), changes.Removed[1].key) + assert.EqualValues(t, []byte{1}, changes.Removed[1].val) + +} + +type TestDiffMap struct { + Added []adtMapDiffResult + Modified []TestAdtMapDiffModified + Removed []adtMapDiffResult +} + +var _ AdtMapDiff = &TestDiffMap{} + +func (t *TestDiffMap) AsKey(key string) (abi.Keyer, error) { + k, err := abi.ParseUIntKey(key) + if err != nil { + return nil, err + } + return abi.UIntKey(k), nil +} + +func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error { + v := new(runtime.CBORBytes) + err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return err + } + k, err := abi.ParseUIntKey(key) + if err != nil { + return err + } + t.Added = append(t.Added, adtMapDiffResult{ + key: k, + val: *v, + }) + return nil +} + +func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error { + vFrom := new(runtime.CBORBytes) + err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw)) + if err != nil { + return err + } + + vTo := new(runtime.CBORBytes) + err = vTo.UnmarshalCBOR(bytes.NewReader(to.Raw)) + if err != nil { + return err + } + + k, err := abi.ParseUIntKey(key) + if err != nil { + return err + } + + if !bytes.Equal(*vFrom, *vTo) { + t.Modified = append(t.Modified, TestAdtMapDiffModified{ + From: adtMapDiffResult{ + key: k, + val: *vFrom, + }, + To: adtMapDiffResult{ + key: k, + val: *vTo, + }, + }) + } + return nil +} + +func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error { + v := new(runtime.CBORBytes) + err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return err + } + k, err := abi.ParseUIntKey(key) + if err != nil { + return err + } + t.Removed = append(t.Removed, adtMapDiffResult{ + key: k, + val: *v, + }) + return nil +} + +type adtMapDiffResult struct { + key uint64 + val runtime.CBORBytes +} + +type TestAdtMapDiffModified struct { + From adtMapDiffResult + To adtMapDiffResult +} + +type adtArrayDiffResult struct { + key uint64 + val runtime.CBORBytes +} + +type TestDiffArray struct { + Added []adtArrayDiffResult + Modified []TestAdtArrayDiffModified + Removed []adtArrayDiffResult +} + +var _ AdtArrayDiff = &TestDiffArray{} + +type TestAdtArrayDiffModified struct { + From adtArrayDiffResult + To adtArrayDiffResult +} + +func (t *TestDiffArray) Add(key uint64, val *typegen.Deferred) error { + v := new(runtime.CBORBytes) + err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return err + } + t.Added = append(t.Added, adtArrayDiffResult{ + key: key, + val: *v, + }) + return nil +} + +func (t *TestDiffArray) Modify(key uint64, from, to *typegen.Deferred) error { + vFrom := new(runtime.CBORBytes) + err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw)) + if err != nil { + return err + } + + vTo := new(runtime.CBORBytes) + err = vTo.UnmarshalCBOR(bytes.NewReader(to.Raw)) + if err != nil { + return err + } + + if !bytes.Equal(*vFrom, *vTo) { + t.Modified = append(t.Modified, TestAdtArrayDiffModified{ + From: adtArrayDiffResult{ + key: key, + val: *vFrom, + }, + To: adtArrayDiffResult{ + key: key, + val: *vTo, + }, + }) + } + return nil +} + +func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error { + v := new(runtime.CBORBytes) + err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return err + } + t.Removed = append(t.Removed, adtArrayDiffResult{ + key: key, + val: *v, + }) + return nil +} + +func newContextStore() Store { + ctx := context.Background() + bs := bstore.NewTemporarySync() + store := cbornode.NewCborStore(bs) + return WrapStore(ctx, store) +} diff --git a/chain/actors/adt/store.go b/chain/actors/adt/store.go new file mode 100644 index 000000000..8dd9841a1 --- /dev/null +++ b/chain/actors/adt/store.go @@ -0,0 +1,17 @@ +package adt + +import ( + "context" + + adt "github.com/filecoin-project/specs-actors/actors/util/adt" + cbor "github.com/ipfs/go-ipld-cbor" +) + +type Store interface { + Context() context.Context + cbor.IpldStore +} + +func WrapStore(ctx context.Context, store cbor.IpldStore) Store { + return adt.WrapStore(ctx, store) +} diff --git a/chain/actors/aerrors/error.go b/chain/actors/aerrors/error.go index ca545e366..12f802c8f 100644 --- a/chain/actors/aerrors/error.go +++ b/chain/actors/aerrors/error.go @@ -3,13 +3,14 @@ package aerrors import ( "fmt" + "github.com/filecoin-project/go-state-types/exitcode" "golang.org/x/xerrors" ) func IsFatal(err ActorError) bool { return err != nil && err.IsFatal() } -func RetCode(err ActorError) uint8 { +func RetCode(err ActorError) exitcode.ExitCode { if err == nil { return 0 } @@ -25,12 +26,12 @@ type internalActorError interface { type ActorError interface { error IsFatal() bool - RetCode() uint8 + RetCode() exitcode.ExitCode } type actorError struct { fatal bool - retCode uint8 + retCode exitcode.ExitCode msg string frame xerrors.Frame @@ -41,7 +42,7 @@ func (e *actorError) IsFatal() bool { return e.fatal } -func (e *actorError) RetCode() uint8 { +func (e *actorError) RetCode() exitcode.ExitCode { return e.retCode } diff --git a/chain/actors/aerrors/error_test.go b/chain/actors/aerrors/error_test.go index 242874a06..3bfd3d042 100644 --- a/chain/actors/aerrors/error_test.go +++ b/chain/actors/aerrors/error_test.go @@ -3,6 +3,7 @@ package aerrors_test import ( "testing" + "github.com/filecoin-project/go-state-types/exitcode" . "github.com/filecoin-project/lotus/chain/actors/aerrors" "github.com/stretchr/testify/assert" @@ -31,5 +32,5 @@ func TestAbsorbeError(t *testing.T) { aw3 := Wrap(aw2, "creating miner in storage market") t.Logf("Verbose error: %+v", aw3) t.Logf("Normal error: %v", aw3) - assert.Equal(t, uint8(35), RetCode(aw3)) + assert.Equal(t, exitcode.ExitCode(35), RetCode(aw3)) } diff --git a/chain/actors/aerrors/wrap.go b/chain/actors/aerrors/wrap.go index da12cb7c6..0552829f9 100644 --- a/chain/actors/aerrors/wrap.go +++ b/chain/actors/aerrors/wrap.go @@ -4,12 +4,13 @@ import ( "errors" "fmt" - hamt "github.com/ipfs/go-hamt-ipld" + "github.com/filecoin-project/go-state-types/exitcode" + cbor "github.com/ipfs/go-ipld-cbor" "golang.org/x/xerrors" ) // New creates a new non-fatal error -func New(retCode uint8, message string) ActorError { +func New(retCode exitcode.ExitCode, message string) ActorError { if retCode == 0 { return &actorError{ fatal: true, @@ -29,7 +30,7 @@ func New(retCode uint8, message string) ActorError { } // Newf creates a new non-fatal error -func Newf(retCode uint8, format string, args ...interface{}) ActorError { +func Newf(retCode exitcode.ExitCode, format string, args ...interface{}) ActorError { if retCode == 0 { return &actorError{ fatal: true, @@ -48,6 +49,27 @@ func Newf(retCode uint8, format string, args ...interface{}) ActorError { } } +// todo: bit hacky + +func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError { + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried creating an error and setting RetCode to 0", + frame: xerrors.Caller(skip), + err: fmt.Errorf(format, args...), + } + } + return &actorError{ + retCode: retCode, + + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(skip), + } +} + func Fatal(message string, args ...interface{}) ActorError { return &actorError{ fatal: true, @@ -95,7 +117,7 @@ func Wrapf(err ActorError, format string, args ...interface{}) ActorError { } // Absorb takes and error and makes in not fatal ActorError -func Absorb(err error, retCode uint8, msg string) ActorError { +func Absorb(err error, retCode exitcode.ExitCode, msg string) ActorError { if err == nil { return nil } @@ -160,7 +182,7 @@ func HandleExternalError(err error, msg string) ActorError { } } - if xerrors.Is(err, &hamt.SerializationError{}) { + if xerrors.Is(err, &cbor.SerializationError{}) { return &actorError{ fatal: false, retCode: 253, @@ -171,7 +193,8 @@ func HandleExternalError(err error, msg string) ActorError { } return &actorError{ - fatal: true, + fatal: false, + retCode: 219, msg: msg, frame: xerrors.Caller(1), diff --git a/chain/actors/builtin/README.md b/chain/actors/builtin/README.md new file mode 100644 index 000000000..21b3fd38f --- /dev/null +++ b/chain/actors/builtin/README.md @@ -0,0 +1,29 @@ +# Actors + +This package contains shims for abstracting over different actor versions. + +## Design + +Shims in this package follow a few common design principles. + +### Structure Agnostic + +Shims interfaces defined in this package should (ideally) not change even if the +structure of the underlying data changes. For example: + +* All shims store an internal "store" object. That way, state can be moved into + a separate object without needing to add a store to the function signature. +* All functions must return an error, even if unused for now. + +### Minimal + +These interfaces should be expanded only as necessary to reduce maintenance burden. + +### Queries, not field assessors. + +When possible, functions should query the state instead of simply acting as +field assessors. These queries are more likely to remain stable across +specs-actor upgrades than specific state fields. + +Note: there is a trade-off here. Avoid implementing _complicated_ query logic +inside these shims, as it will need to be replicated in every shim. diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go new file mode 100644 index 000000000..5b90580ec --- /dev/null +++ b/chain/actors/builtin/account/account.go @@ -0,0 +1,31 @@ +package account + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { + case builtin0.AccountActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + + PubkeyAddress() (address.Address, error) +} diff --git a/chain/actors/builtin/account/v0.go b/chain/actors/builtin/account/v0.go new file mode 100644 index 000000000..30bafbfd3 --- /dev/null +++ b/chain/actors/builtin/account/v0.go @@ -0,0 +1,18 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/specs-actors/actors/builtin/account" +) + +var _ State = (*state0)(nil) + +type state0 struct { + account.State + store adt.Store +} + +func (s *state0) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go new file mode 100644 index 000000000..a85b4da65 --- /dev/null +++ b/chain/actors/builtin/builtin.go @@ -0,0 +1,43 @@ +package builtin + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing" + + "github.com/filecoin-project/go-state-types/network" +) + +type Version int + +const ( + Version0 = iota +) + +// Converts a network version into a specs-actors version. +func VersionForNetwork(version network.Version) Version { + switch version { + case network.Version0, network.Version1, network.Version2, network.Version3: + return Version0 + default: + panic(fmt.Sprintf("unsupported network version %d", version)) + } +} + +// TODO: Why does actors have 2 different versions of this? +type SectorInfo = proof0.SectorInfo +type PoStProof = proof0.PoStProof +type FilterEstimate = smoothing0.FilterEstimate + +func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { + return (FilterEstimate)(v0) +} + +// Doesn't change between actors v0 and v1 +func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { + return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go new file mode 100644 index 000000000..f235450c2 --- /dev/null +++ b/chain/actors/builtin/init/init.go @@ -0,0 +1,44 @@ +package init + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +var Address = builtin0.InitActorAddr + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { + case builtin0.InitActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + + ResolveAddress(address address.Address) (address.Address, bool, error) + MapAddressToNewID(address address.Address) (address.Address, error) + NetworkName() (dtypes.NetworkName, error) + + ForEachActor(func(id abi.ActorID, address address.Address) error) error + + // Remove exists to support tooling that manipulates state for testing. + // It should not be used in production code, as init actor entries are + // immutable. + Remove(addrs ...address.Address) error +} diff --git a/chain/actors/builtin/init/v0.go b/chain/actors/builtin/init/v0.go new file mode 100644 index 000000000..425ba654c --- /dev/null +++ b/chain/actors/builtin/init/v0.go @@ -0,0 +1,67 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +type state0 struct { + init_.State + store adt.Store +} + +func (s *state0) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state0) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state0) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt0.AsMap(s.store, s.State.AddressMap) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state0) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state0) Remove(addrs ...address.Address) (err error) { + m, err := adt0.AsMap(s.store, s.State.AddressMap) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} diff --git a/chain/actors/builtin/market/diff.go b/chain/actors/builtin/market/diff.go new file mode 100644 index 000000000..d0b4a2fd3 --- /dev/null +++ b/chain/actors/builtin/market/diff.go @@ -0,0 +1,91 @@ +package market + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/adt" + cbg "github.com/whyrusleeping/cbor-gen" +) + +func DiffDealProposals(pre, cur DealProposals) (*DealProposalChanges, error) { + results := new(DealProposalChanges) + if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketProposalsDiffer{results, pre, cur}); err != nil { + return nil, fmt.Errorf("diffing deal states: %w", err) + } + return results, nil +} + +type marketProposalsDiffer struct { + Results *DealProposalChanges + pre, cur DealProposals +} + +func (d *marketProposalsDiffer) Add(key uint64, val *cbg.Deferred) error { + dp, err := d.cur.decode(val) + if err != nil { + return err + } + d.Results.Added = append(d.Results.Added, ProposalIDState{abi.DealID(key), *dp}) + return nil +} + +func (d *marketProposalsDiffer) Modify(key uint64, from, to *cbg.Deferred) error { + // short circuit, DealProposals are static + return nil +} + +func (d *marketProposalsDiffer) Remove(key uint64, val *cbg.Deferred) error { + dp, err := d.pre.decode(val) + if err != nil { + return err + } + d.Results.Removed = append(d.Results.Removed, ProposalIDState{abi.DealID(key), *dp}) + return nil +} + +func DiffDealStates(pre, cur DealStates) (*DealStateChanges, error) { + results := new(DealStateChanges) + if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketStatesDiffer{results, pre, cur}); err != nil { + return nil, fmt.Errorf("diffing deal states: %w", err) + } + return results, nil +} + +type marketStatesDiffer struct { + Results *DealStateChanges + pre, cur DealStates +} + +func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error { + ds, err := d.cur.decode(val) + if err != nil { + return err + } + d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds}) + return nil +} + +func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error { + dsFrom, err := d.pre.decode(from) + if err != nil { + return err + } + dsTo, err := d.cur.decode(to) + if err != nil { + return err + } + if *dsFrom != *dsTo { + d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo}) + } + return nil +} + +func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error { + ds, err := d.pre.decode(val) + if err != nil { + return err + } + d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds}) + return nil +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go new file mode 100644 index 000000000..fef0c03f9 --- /dev/null +++ b/chain/actors/builtin/market/market.go @@ -0,0 +1,129 @@ +package market + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" +) + +var Address = builtin0.StorageMarketActorAddr + +func Load(store adt.Store, act *types.Actor) (st State, err error) { + switch act.Code { + case builtin0.StorageMarketActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + BalancesChanged(State) (bool, error) + EscrowTable() (BalanceTable, error) + LockedTable() (BalanceTable, error) + TotalLocked() (abi.TokenAmount, error) + StatesChanged(State) (bool, error) + States() (DealStates, error) + ProposalsChanged(State) (bool, error) + Proposals() (DealProposals, error) + VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, + ) (weight, verifiedWeight abi.DealWeight, err error) +} + +type BalanceTable interface { + ForEach(cb func(address.Address, abi.TokenAmount) error) error + Get(key address.Address) (abi.TokenAmount, error) +} + +type DealStates interface { + ForEach(cb func(id abi.DealID, ds DealState) error) error + Get(id abi.DealID) (*DealState, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*DealState, error) +} + +type DealProposals interface { + ForEach(cb func(id abi.DealID, dp DealProposal) error) error + Get(id abi.DealID) (*DealProposal, bool, error) + + array() adt.Array + decode(*cbg.Deferred) (*DealProposal, error) +} + +type PublishStorageDealsParams = market0.PublishStorageDealsParams +type PublishStorageDealsReturn = market0.PublishStorageDealsReturn +type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams + +type ClientDealProposal = market0.ClientDealProposal + +type DealState struct { + SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated + SlashEpoch abi.ChainEpoch // -1 if deal never slashed +} + +type DealProposal struct { + PieceCID cid.Cid + PieceSize abi.PaddedPieceSize + VerifiedDeal bool + Client address.Address + Provider address.Address + Label string + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + StoragePricePerEpoch abi.TokenAmount + ProviderCollateral abi.TokenAmount + ClientCollateral abi.TokenAmount +} + +type DealStateChanges struct { + Added []DealIDState + Modified []DealStateChange + Removed []DealIDState +} + +type DealIDState struct { + ID abi.DealID + Deal DealState +} + +// DealStateChange is a change in deal state from -> to +type DealStateChange struct { + ID abi.DealID + From *DealState + To *DealState +} + +type DealProposalChanges struct { + Added []ProposalIDState + Removed []ProposalIDState +} + +type ProposalIDState struct { + ID abi.DealID + Proposal DealProposal +} + +func EmptyDealState() *DealState { + return &DealState{ + SectorStartEpoch: -1, + SlashEpoch: -1, + LastUpdatedEpoch: -1, + } +} diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go new file mode 100644 index 000000000..2727f513d --- /dev/null +++ b/chain/actors/builtin/market/v0.go @@ -0,0 +1,192 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var _ State = (*state0)(nil) + +type state0 struct { + market.State + store adt.Store +} + +func (s *state0) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state0) BalancesChanged(otherState State) (bool, error) { + otherState0, ok := otherState.(*state0) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState0.State.EscrowTable) || !s.State.LockedTable.Equals(otherState0.State.LockedTable), nil +} + +func (s *state0) StatesChanged(otherState State) (bool, error) { + otherState0, ok := otherState.(*state0) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState0.State.States), nil +} + +func (s *state0) States() (DealStates, error) { + stateArray, err := adt0.AsArray(s.store, s.State.States) + if err != nil { + return nil, err + } + return &dealStates0{stateArray}, nil +} + +func (s *state0) ProposalsChanged(otherState State) (bool, error) { + otherState0, ok := otherState.(*state0) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState0.State.Proposals), nil +} + +func (s *state0) Proposals() (DealProposals, error) { + proposalArray, err := adt0.AsArray(s.store, s.State.Proposals) + if err != nil { + return nil, err + } + return &dealProposals0{proposalArray}, nil +} + +func (s *state0) EscrowTable() (BalanceTable, error) { + bt, err := adt0.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable0{bt}, nil +} + +func (s *state0) LockedTable() (BalanceTable, error) { + bt, err := adt0.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable0{bt}, nil +} + +func (s *state0) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + return market.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) +} + +type balanceTable0 struct { + *adt0.BalanceTable +} + +func (bt *balanceTable0) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt0.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates0 struct { + adt.Array +} + +func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal0 market.DealState + found, err := s.Array.Get(uint64(dealID), &deal0) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV0DealState(deal0) + return &deal, true, nil +} + +func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds0 market.DealState + return s.Array.ForEach(&ds0, func(idx int64) error { + return cb(abi.DealID(idx), fromV0DealState(ds0)) + }) +} + +func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) { + var ds0 market.DealState + if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV0DealState(ds0) + return &ds, nil +} + +func (s *dealStates0) array() adt.Array { + return s.Array +} + +func fromV0DealState(v0 market.DealState) DealState { + return (DealState)(v0) +} + +type dealProposals0 struct { + adt.Array +} + +func (s *dealProposals0) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal0 market.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal0) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV0DealProposal(proposal0) + return &proposal, true, nil +} + +func (s *dealProposals0) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp0 market.DealProposal + return s.Array.ForEach(&dp0, func(idx int64) error { + return cb(abi.DealID(idx), fromV0DealProposal(dp0)) + }) +} + +func (s *dealProposals0) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp0 market.DealProposal + if err := dp0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV0DealProposal(dp0) + return &dp, nil +} + +func (s *dealProposals0) array() adt.Array { + return s.Array +} + +func fromV0DealProposal(v0 market.DealProposal) DealProposal { + return (DealProposal)(v0) +} diff --git a/chain/actors/builtin/miner/diff.go b/chain/actors/builtin/miner/diff.go new file mode 100644 index 000000000..dde4db890 --- /dev/null +++ b/chain/actors/builtin/miner/diff.go @@ -0,0 +1,127 @@ +package miner + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/adt" + cbg "github.com/whyrusleeping/cbor-gen" +) + +func DiffPreCommits(pre, cur State) (*PreCommitChanges, error) { + results := new(PreCommitChanges) + + prep, err := pre.precommits() + if err != nil { + return nil, err + } + + curp, err := cur.precommits() + if err != nil { + return nil, err + } + + err = adt.DiffAdtMap(prep, curp, &preCommitDiffer{results, pre, cur}) + if err != nil { + return nil, err + } + + return results, nil +} + +type preCommitDiffer struct { + Results *PreCommitChanges + pre, after State +} + +func (m *preCommitDiffer) AsKey(key string) (abi.Keyer, error) { + sector, err := abi.ParseUIntKey(key) + if err != nil { + return nil, err + } + return abi.UIntKey(sector), nil +} + +func (m *preCommitDiffer) Add(key string, val *cbg.Deferred) error { + sp, err := m.after.decodeSectorPreCommitOnChainInfo(val) + if err != nil { + return err + } + m.Results.Added = append(m.Results.Added, sp) + return nil +} + +func (m *preCommitDiffer) Modify(key string, from, to *cbg.Deferred) error { + return nil +} + +func (m *preCommitDiffer) Remove(key string, val *cbg.Deferred) error { + sp, err := m.pre.decodeSectorPreCommitOnChainInfo(val) + if err != nil { + return err + } + m.Results.Removed = append(m.Results.Removed, sp) + return nil +} + +func DiffSectors(pre, cur State) (*SectorChanges, error) { + results := new(SectorChanges) + + pres, err := pre.sectors() + if err != nil { + return nil, err + } + + curs, err := cur.sectors() + if err != nil { + return nil, err + } + + err = adt.DiffAdtArray(pres, curs, §orDiffer{results, pre, cur}) + if err != nil { + return nil, err + } + + return results, nil +} + +type sectorDiffer struct { + Results *SectorChanges + pre, after State +} + +func (m *sectorDiffer) Add(key uint64, val *cbg.Deferred) error { + si, err := m.after.decodeSectorOnChainInfo(val) + if err != nil { + return err + } + m.Results.Added = append(m.Results.Added, si) + return nil +} + +func (m *sectorDiffer) Modify(key uint64, from, to *cbg.Deferred) error { + siFrom, err := m.pre.decodeSectorOnChainInfo(from) + if err != nil { + return err + } + + siTo, err := m.after.decodeSectorOnChainInfo(to) + if err != nil { + return err + } + + if siFrom.Expiration != siTo.Expiration { + m.Results.Extended = append(m.Results.Extended, SectorExtensions{ + From: siFrom, + To: siTo, + }) + } + return nil +} + +func (m *sectorDiffer) Remove(key uint64, val *cbg.Deferred) error { + si, err := m.pre.decodeSectorOnChainInfo(val) + if err != nil { + return err + } + m.Results.Removed = append(m.Results.Removed, si) + return nil +} diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go new file mode 100644 index 000000000..1a4c466b9 --- /dev/null +++ b/chain/actors/builtin/miner/miner.go @@ -0,0 +1,169 @@ +package miner + +import ( + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/dline" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" +) + +// Unchanged between v0 and v1 actors +var WPoStProvingPeriod = miner0.WPoStProvingPeriod + +const MinSectorExpiration = miner0.MinSectorExpiration + +func Load(store adt.Store, act *types.Actor) (st State, err error) { + switch act.Code { + case builtin0.StorageMinerActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + + // Total available balance to spend. + AvailableBalance(abi.TokenAmount) (abi.TokenAmount, error) + // Funds that will vest by the given epoch. + VestedFunds(abi.ChainEpoch) (abi.TokenAmount, error) + // Funds locked for various reasons. + LockedFunds() (LockedFunds, error) + + GetSector(abi.SectorNumber) (*SectorOnChainInfo, error) + FindSector(abi.SectorNumber) (*SectorLocation, error) + GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) + GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) + LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) + NumLiveSectors() (uint64, error) + IsAllocated(abi.SectorNumber) (bool, error) + + LoadDeadline(idx uint64) (Deadline, error) + ForEachDeadline(cb func(idx uint64, dl Deadline) error) error + NumDeadlines() (uint64, error) + DeadlinesChanged(State) (bool, error) + + Info() (MinerInfo, error) + + DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) + + // Diff helpers. Used by Diff* functions internally. + sectors() (adt.Array, error) + decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) + precommits() (adt.Map, error) + decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error) +} + +type Deadline interface { + LoadPartition(idx uint64) (Partition, error) + ForEachPartition(cb func(idx uint64, part Partition) error) error + PostSubmissions() (bitfield.BitField, error) + + PartitionsChanged(Deadline) (bool, error) +} + +type Partition interface { + AllSectors() (bitfield.BitField, error) + FaultySectors() (bitfield.BitField, error) + RecoveringSectors() (bitfield.BitField, error) + LiveSectors() (bitfield.BitField, error) + ActiveSectors() (bitfield.BitField, error) +} + +type SectorOnChainInfo struct { + SectorNumber abi.SectorNumber + SealProof abi.RegisteredSealProof + SealedCID cid.Cid + DealIDs []abi.DealID + Activation abi.ChainEpoch + Expiration abi.ChainEpoch + DealWeight abi.DealWeight + VerifiedDealWeight abi.DealWeight + InitialPledge abi.TokenAmount + ExpectedDayReward abi.TokenAmount + ExpectedStoragePledge abi.TokenAmount +} + +type SectorPreCommitInfo = miner0.SectorPreCommitInfo + +type SectorPreCommitOnChainInfo struct { + Info SectorPreCommitInfo + PreCommitDeposit abi.TokenAmount + PreCommitEpoch abi.ChainEpoch + DealWeight abi.DealWeight + VerifiedDealWeight abi.DealWeight +} + +type PoStPartition = miner0.PoStPartition +type RecoveryDeclaration = miner0.RecoveryDeclaration +type FaultDeclaration = miner0.FaultDeclaration + +// Params +type DeclareFaultsParams = miner0.DeclareFaultsParams +type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams +type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams +type ProveCommitSectorParams = miner0.ProveCommitSectorParams + +type MinerInfo struct { + Owner address.Address // Must be an ID-address. + Worker address.Address // Must be an ID-address. + NewWorker address.Address // Must be an ID-address. + ControlAddresses []address.Address // Must be an ID-addresses. + WorkerChangeEpoch abi.ChainEpoch + PeerId *peer.ID + Multiaddrs []abi.Multiaddrs + SealProofType abi.RegisteredSealProof + SectorSize abi.SectorSize + WindowPoStPartitionSectors uint64 +} + +type SectorExpiration struct { + OnTime abi.ChainEpoch + + // non-zero if sector is faulty, epoch at which it will be permanently + // removed if it doesn't recover + Early abi.ChainEpoch +} + +type SectorLocation struct { + Deadline uint64 + Partition uint64 +} + +type SectorChanges struct { + Added []SectorOnChainInfo + Extended []SectorExtensions + Removed []SectorOnChainInfo +} + +type SectorExtensions struct { + From SectorOnChainInfo + To SectorOnChainInfo +} + +type PreCommitChanges struct { + Added []SectorPreCommitOnChainInfo + Removed []SectorPreCommitOnChainInfo +} + +type LockedFunds struct { + VestingFunds abi.TokenAmount + InitialPledgeRequirement abi.TokenAmount + PreCommitDeposits abi.TokenAmount +} diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go new file mode 100644 index 000000000..f9c6b3da3 --- /dev/null +++ b/chain/actors/builtin/miner/utils.go @@ -0,0 +1,28 @@ +package miner + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" +) + +func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) { + var parts []bitfield.BitField + + err := mas.ForEachDeadline(func(dlidx uint64, dl Deadline) error { + return dl.ForEachPartition(func(partidx uint64, part Partition) error { + s, err := sget(part) + if err != nil { + return xerrors.Errorf("getting sector list (dl: %d, part %d): %w", dlidx, partidx, err) + } + + parts = append(parts, s) + return nil + }) + }) + if err != nil { + return bitfield.BitField{}, err + } + + return bitfield.MultiMerge(parts...) +} diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go new file mode 100644 index 000000000..f5aa7849d --- /dev/null +++ b/chain/actors/builtin/miner/v0.go @@ -0,0 +1,373 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state0)(nil) + +type state0 struct { + miner0.State + store adt.Store +} + +type deadline0 struct { + miner0.Deadline + store adt.Store +} + +type partition0 struct { + miner0.Partition + store adt.Store +} + +func (s *state0) AvailableBalance(bal abi.TokenAmount) (abi.TokenAmount, error) { + return s.GetAvailableBalance(bal), nil +} + +func (s *state0) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state0) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledgeRequirement, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state0) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledgeRequirement, nil +} + +func (s *state0) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state0) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV0SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state0) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state0) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will either expire on-time (can be + // learned from the sector info), or in the next quantized expiration + // epoch (i.e., the first element in the partition's expiration queue. + // 2. If it's faulty, it will expire early within the first 14 entries + // of the expiration queue. + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner0.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner0.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant) + if err != nil { + return err + } + var exp miner0.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV0SectorPreCommitOnChainInfo(*info) + return &ret, nil +} + +func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner0.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info0 miner0.SectorOnChainInfo + if err := sectors.ForEach(&info0, func(_ int64) error { + info := fromV0SectorOnChainInfo(info0) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos0, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos0)) + for i, info0 := range infos0 { + info := fromV0SectorOnChainInfo(*info0) + infos[i] = &info + } + return infos, nil +} + +func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state0) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline0{*dl, s.store}, nil +} + +func (s *state0) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner0.Deadline) error { + return cb(i, &deadline0{*dl, s.store}) + }) +} + +func (s *state0) NumDeadlines() (uint64, error) { + return miner0.WPoStPeriodDeadlines, nil +} + +func (s *state0) DeadlinesChanged(other State) (bool, error) { + other0, ok := other.(*state0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return s.State.Deadlines.Equals(other0.Deadlines), nil +} + +func (s *state0) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + SealProofType: info.SealProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state0) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.DeadlineInfo(epoch), nil +} + +func (s *state0) sectors() (adt.Array, error) { + return adt0.AsArray(s.store, s.Sectors) +} + +func (s *state0) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner0.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV0SectorOnChainInfo(si), nil +} + +func (s *state0) precommits() (adt.Map, error) { + return adt0.AsMap(s.store, s.PreCommittedSectors) +} + +func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner0.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV0SectorPreCommitOnChainInfo(sp), nil +} + +func (d *deadline0) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition0{*p, d.store}, nil +} + +func (d *deadline0) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner0.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition0{part, d.store}) + }) +} + +func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) { + other0, ok := other.(*deadline0) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil +} + +func (d *deadline0) PostSubmissions() (bitfield.BitField, error) { + return d.Deadline.PostSubmissions, nil +} + +func (p *partition0) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition0) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition0) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo { + return (SectorOnChainInfo)(v0) +} + +func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + return (SectorPreCommitOnChainInfo)(v0) +} diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go new file mode 100644 index 000000000..884b6f493 --- /dev/null +++ b/chain/actors/builtin/multisig/multisig.go @@ -0,0 +1,43 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" +) + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { + case builtin0.MultisigActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + + LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error) + StartEpoch() (abi.ChainEpoch, error) + UnlockDuration() (abi.ChainEpoch, error) + InitialBalance() (abi.TokenAmount, error) + Threshold() (uint64, error) + Signers() ([]address.Address, error) + + ForEachPendingTxn(func(id int64, txn Transaction) error) error +} + +type Transaction = msig0.Transaction diff --git a/chain/actors/builtin/multisig/v0.go b/chain/actors/builtin/multisig/v0.go new file mode 100644 index 000000000..ae0a7ac0e --- /dev/null +++ b/chain/actors/builtin/multisig/v0.go @@ -0,0 +1,59 @@ +package multisig + +import ( + "encoding/binary" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/adt" + "golang.org/x/xerrors" + + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +type state0 struct { + msig0.State + store adt.Store +} + +func (s *state0) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state0) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state0) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state0) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state0) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state0) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt0.AsMap(s.store, s.State.PendingTxns) + if err != nil { + return err + } + var out msig0.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) + }) +} diff --git a/chain/actors/builtin/paych/mock/mock.go b/chain/actors/builtin/paych/mock/mock.go new file mode 100644 index 000000000..c4903f3ac --- /dev/null +++ b/chain/actors/builtin/paych/mock/mock.go @@ -0,0 +1,89 @@ +package mock + +import ( + "io" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" +) + +type mockState struct { + from address.Address + to address.Address + settlingAt abi.ChainEpoch + toSend abi.TokenAmount + lanes map[uint64]paych.LaneState +} + +type mockLaneState struct { + redeemed big.Int + nonce uint64 +} + +// NewMockPayChState constructs a state for a payment channel with the set fixed values +// that satisfies the paych.State interface. +func NewMockPayChState(from address.Address, + to address.Address, + settlingAt abi.ChainEpoch, + toSend abi.TokenAmount, + lanes map[uint64]paych.LaneState, +) paych.State { + return &mockState{from, to, settlingAt, toSend, lanes} +} + +// NewMockLaneState constructs a state for a payment channel lane with the set fixed values +// that satisfies the paych.LaneState interface. Useful for populating lanes when +// calling NewMockPayChState +func NewMockLaneState(redeemed big.Int, nonce uint64) paych.LaneState { + return &mockLaneState{redeemed, nonce} +} + +func (ms *mockState) MarshalCBOR(io.Writer) error { + panic("not implemented") +} + +// Channel owner, who has funded the actor +func (ms *mockState) From() (address.Address, error) { + return ms.from, nil +} + +// Recipient of payouts from channel +func (ms *mockState) To() (address.Address, error) { + return ms.to, nil +} + +// Height at which the channel can be `Collected` +func (ms *mockState) SettlingAt() (abi.ChainEpoch, error) { + return ms.settlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (ms *mockState) ToSend() (abi.TokenAmount, error) { + return ms.toSend, nil +} + +// Get total number of lanes +func (ms *mockState) LaneCount() (uint64, error) { + return uint64(len(ms.lanes)), nil +} + +// Iterate lane states +func (ms *mockState) ForEachLaneState(cb func(idx uint64, dl paych.LaneState) error) error { + var lastErr error + for lane, state := range ms.lanes { + if err := cb(lane, state); err != nil { + lastErr = err + } + } + return lastErr +} + +func (mls *mockLaneState) Redeemed() (big.Int, error) { + return mls.redeemed, nil +} + +func (mls *mockLaneState) Nonce() (uint64, error) { + return mls.nonce, nil +} diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go new file mode 100644 index 000000000..dad54163f --- /dev/null +++ b/chain/actors/builtin/paych/paych.go @@ -0,0 +1,60 @@ +package paych + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" +) + +// Load returns an abstract copy of payment channel state, irregardless of actor version +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { + case builtin0.PaymentChannelActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +// State is an abstract version of payment channel state that works across +// versions +type State interface { + cbor.Marshaler + // Channel owner, who has funded the actor + From() (address.Address, error) + // Recipient of payouts from channel + To() (address.Address, error) + + // Height at which the channel can be `Collected` + SettlingAt() (abi.ChainEpoch, error) + + // Amount successfully redeemed through the payment channel, paid out on `Collect()` + ToSend() (abi.TokenAmount, error) + + // Get total number of lanes + LaneCount() (uint64, error) + + // Iterate lane states + ForEachLaneState(cb func(idx uint64, dl LaneState) error) error +} + +// LaneState is an abstract copy of the state of a single lane +type LaneState interface { + Redeemed() (big.Int, error) + Nonce() (uint64, error) +} + +type SignedVoucher = paych0.SignedVoucher +type ModVerifyParams = paych0.ModVerifyParams diff --git a/chain/actors/builtin/paych/v0.go b/chain/actors/builtin/paych/v0.go new file mode 100644 index 000000000..c0eea1000 --- /dev/null +++ b/chain/actors/builtin/paych/v0.go @@ -0,0 +1,91 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/specs-actors/actors/builtin/paych" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +type state0 struct { + paych.State + store adt.Store + lsAmt *adt0.Array +} + +// Channel owner, who has funded the actor +func (s *state0) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state0) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state0) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state0) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state0) getOrLoadLsAmt() (*adt0.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt0.AsArray(s.store, s.State.LaneStates) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state0) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +// Iterate lane states +func (s *state0) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState0{ls}) + }) +} + +type laneState0 struct { + paych.LaneState +} + +func (ls *laneState0) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState0) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go new file mode 100644 index 000000000..e4bb52d44 --- /dev/null +++ b/chain/actors/builtin/power/power.go @@ -0,0 +1,53 @@ +package power + +import ( + "github.com/filecoin-project/go-address" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +var Address = builtin0.StoragePowerActorAddr + +func Load(store adt.Store, act *types.Actor) (st State, err error) { + switch act.Code { + case builtin0.StoragePowerActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + + TotalLocked() (abi.TokenAmount, error) + TotalPower() (Claim, error) + TotalCommitted() (Claim, error) + TotalPowerSmoothed() (builtin.FilterEstimate, error) + + // MinerCounts returns the number of miners. Participating is the number + // with power above the minimum miner threshold. + MinerCounts() (participating, total uint64, err error) + MinerPower(address.Address) (Claim, bool, error) + MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error) + ListAllMiners() ([]address.Address, error) +} + +type Claim struct { + // Sum of raw byte power for a miner's sectors. + RawBytePower abi.StoragePower + + // Sum of quality adjusted power for a miner's sectors. + QualityAdjPower abi.StoragePower +} diff --git a/chain/actors/builtin/power/v0.go b/chain/actors/builtin/power/v0.go new file mode 100644 index 000000000..f2fe96dad --- /dev/null +++ b/chain/actors/builtin/power/v0.go @@ -0,0 +1,85 @@ +package power + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/builtin" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +var _ State = (*state0)(nil) + +type state0 struct { + power0.State + store adt.Store +} + +func (s *state0) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state0) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state0) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state0) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := adt.AsMap(s.store, s.Claims) + if err != nil { + return Claim{}, false, err + } + var claim power0.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state0) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state0) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV0FilterEstimate(*s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state0) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state0) ListAllMiners() ([]address.Address, error) { + claims, err := adt.AsMap(s.store, s.Claims) + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go new file mode 100644 index 000000000..cfa82c774 --- /dev/null +++ b/chain/actors/builtin/reward/reward.go @@ -0,0 +1,50 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +var Address = builtin0.RewardActorAddr + +func Load(store adt.Store, act *types.Actor) (st State, err error) { + switch act.Code { + case builtin0.RewardActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + + ThisEpochBaselinePower() (abi.StoragePower, error) + ThisEpochReward() (abi.StoragePower, error) + ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) + + EffectiveBaselinePower() (abi.StoragePower, error) + EffectiveNetworkTime() (abi.ChainEpoch, error) + + TotalStoragePowerReward() (abi.TokenAmount, error) + + CumsumBaseline() (abi.StoragePower, error) + CumsumRealized() (abi.StoragePower, error) + + InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error) + PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error) +} + +type AwardBlockRewardParams = reward0.AwardBlockRewardParams diff --git a/chain/actors/builtin/reward/v0.go b/chain/actors/builtin/reward/v0.go new file mode 100644 index 000000000..df7117b67 --- /dev/null +++ b/chain/actors/builtin/reward/v0.go @@ -0,0 +1,71 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +var _ State = (*state0)(nil) + +type state0 struct { + reward.State + store adt.Store +} + +func (s *state0) ThisEpochReward() (abi.StoragePower, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state0) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV0FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil +} + +func (s *state0) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state0) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalMined, nil +} + +func (s *state0) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state0) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state0) CumsumBaseline() (abi.StoragePower, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state0) CumsumRealized() (abi.StoragePower, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state0) InitialPledgeForPower(sectorWeight abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner0.InitialPledgeForPower( + sectorWeight, + s.State.ThisEpochBaselinePower, + networkTotalPledge, + s.State.ThisEpochRewardSmoothed, + &smoothing.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply), nil +} + +func (s *state0) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner0.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + &smoothing.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go new file mode 100644 index 000000000..c59a58811 --- /dev/null +++ b/chain/actors/builtin/verifreg/v0.go @@ -0,0 +1,71 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state0)(nil) + +type state0 struct { + verifreg0.State + store adt.Store +} + +func getDataCap(store adt.Store, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) { + if addr.Protocol() != address.ID { + return false, big.Zero(), xerrors.Errorf("can only look up ID addresses") + } + + vh, err := adt0.AsMap(store, root) + if err != nil { + return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err) + } + + var dcap abi.StoragePower + if found, err := vh.Get(abi.AddrKey(addr), &dcap); err != nil { + return false, big.Zero(), xerrors.Errorf("looking up addr: %w", err) + } else if !found { + return false, big.Zero(), nil + } + + return true, dcap, nil +} + +func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, s.State.VerifiedClients, addr) +} + +func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, s.State.Verifiers, addr) +} + +func forEachCap(store adt.Store, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error { + vh, err := adt0.AsMap(store, root) + if err != nil { + return xerrors.Errorf("loading verified clients: %w", err) + } + var dcap abi.StoragePower + return vh.ForEach(&dcap, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, dcap) + }) +} + +func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, s.State.Verifiers, cb) +} + +func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, s.State.VerifiedClients, cb) +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go new file mode 100644 index 000000000..c861f862f --- /dev/null +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -0,0 +1,37 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/cbor" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" +) + +var Address = builtin0.VerifiedRegistryActorAddr + +func Load(store adt.Store, act *types.Actor) (State, error) { + switch act.Code { + case builtin0.VerifiedRegistryActorCodeID: + out := state0{store: store} + err := store.Get(store.Context(), act.Head, &out) + if err != nil { + return nil, err + } + return &out, nil + } + return nil, xerrors.Errorf("unknown actor code %s", act.Code) +} + +type State interface { + cbor.Marshaler + + VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) + VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error + ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error +} diff --git a/chain/actors/cbor_gen.go b/chain/actors/cbor_gen.go deleted file mode 100644 index 73a3512a0..000000000 --- a/chain/actors/cbor_gen.go +++ /dev/null @@ -1,4232 +0,0 @@ -package actors - -import ( - "fmt" - "io" - "sort" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -var _ = xerrors.Errorf - -func (t *InitActorState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.AddressMap (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.AddressMap); err != nil { - return xerrors.Errorf("failed to write cid field t.AddressMap: %w", err) - } - - // t.NextID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.NextID))); err != nil { - return err - } - return nil -} - -func (t *InitActorState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.AddressMap (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.AddressMap: %w", err) - } - - t.AddressMap = c - - } - // t.NextID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.NextID = uint64(extra) - return nil -} - -func (t *ExecParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Code (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Code); err != nil { - return xerrors.Errorf("failed to write cid field t.Code: %w", err) - } - - // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Params was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Params)))); err != nil { - return err - } - if _, err := w.Write(t.Params); err != nil { - return err - } - return nil -} - -func (t *ExecParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Code (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Code: %w", err) - } - - t.Code = c - - } - // t.Params ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Params: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Params = make([]byte, extra) - if _, err := io.ReadFull(br, t.Params); err != nil { - return err - } - return nil -} - -func (t *AccountActorState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Address (address.Address) (struct) - if err := t.Address.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *AccountActorState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Address (address.Address) (struct) - - { - - if err := t.Address.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *StorageMinerActorState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{138}); err != nil { - return err - } - - // t.PreCommittedSectors (map[string]*actors.PreCommittedSector) (map) - { - if len(t.PreCommittedSectors) > 4096 { - return xerrors.Errorf("cannot marshal t.PreCommittedSectors map too large") - } - - if err := cbg.CborWriteHeader(w, cbg.MajMap, uint64(len(t.PreCommittedSectors))); err != nil { - return err - } - - keys := make([]string, 0, len(t.PreCommittedSectors)) - for k := range t.PreCommittedSectors { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := t.PreCommittedSectors[k] - - if len(k) > cbg.MaxLength { - return xerrors.Errorf("Value in field k was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(k)))); err != nil { - return err - } - if _, err := w.Write([]byte(k)); err != nil { - return err - } - - if err := v.MarshalCBOR(w); err != nil { - return err - } - - } - } - - // t.Sectors (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Sectors); err != nil { - return xerrors.Errorf("failed to write cid field t.Sectors: %w", err) - } - - // t.ProvingSet (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.ProvingSet); err != nil { - return xerrors.Errorf("failed to write cid field t.ProvingSet: %w", err) - } - - // t.Info (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Info); err != nil { - return xerrors.Errorf("failed to write cid field t.Info: %w", err) - } - - // t.FaultSet (types.BitField) (struct) - if err := t.FaultSet.MarshalCBOR(w); err != nil { - return err - } - - // t.LastFaultSubmission (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.LastFaultSubmission))); err != nil { - return err - } - - // t.Power (types.BigInt) (struct) - if err := t.Power.MarshalCBOR(w); err != nil { - return err - } - - // t.Active (bool) (bool) - if err := cbg.WriteBool(w, t.Active); err != nil { - return err - } - - // t.SlashedAt (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SlashedAt))); err != nil { - return err - } - - // t.ElectionPeriodStart (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ElectionPeriodStart))); err != nil { - return err - } - return nil -} - -func (t *StorageMinerActorState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 10 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PreCommittedSectors (map[string]*actors.PreCommittedSector) (map) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("expected a map (major type 5)") - } - if extra > 4096 { - return fmt.Errorf("t.PreCommittedSectors: map too large") - } - - t.PreCommittedSectors = make(map[string]*PreCommittedSector, extra) - - for i, l := 0, int(extra); i < l; i++ { - - var k string - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - k = string(sval) - } - - var v *PreCommittedSector - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - v = new(PreCommittedSector) - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - - t.PreCommittedSectors[k] = v - - } - // t.Sectors (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Sectors: %w", err) - } - - t.Sectors = c - - } - // t.ProvingSet (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProvingSet: %w", err) - } - - t.ProvingSet = c - - } - // t.Info (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Info: %w", err) - } - - t.Info = c - - } - // t.FaultSet (types.BitField) (struct) - - { - - if err := t.FaultSet.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.LastFaultSubmission (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.LastFaultSubmission = uint64(extra) - // t.Power (types.BigInt) (struct) - - { - - if err := t.Power.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Active (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Active = false - case 21: - t.Active = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.SlashedAt (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SlashedAt = uint64(extra) - // t.ElectionPeriodStart (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ElectionPeriodStart = uint64(extra) - return nil -} - -func (t *StorageMinerConstructorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.SectorSize (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorSize))); err != nil { - return err - } - - // t.PeerID (peer.ID) (string) - if len(t.PeerID) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.PeerID was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.PeerID)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.PeerID)); err != nil { - return err - } - return nil -} - -func (t *StorageMinerConstructorParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.SectorSize (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorSize = uint64(extra) - // t.PeerID (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.PeerID = peer.ID(sval) - } - return nil -} - -func (t *SectorPreCommitInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.SectorNumber (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorNumber))); err != nil { - return err - } - - // t.CommR ([]uint8) (slice) - if len(t.CommR) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.CommR was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.CommR)))); err != nil { - return err - } - if _, err := w.Write(t.CommR); err != nil { - return err - } - - // t.SealEpoch (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SealEpoch))); err != nil { - return err - } - - // t.DealIDs ([]uint64) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.DealIDs)))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - return nil -} - -func (t *SectorPreCommitInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.SectorNumber (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorNumber = uint64(extra) - // t.CommR ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.CommR: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.CommR = make([]byte, extra) - if _, err := io.ReadFull(br, t.CommR); err != nil { - return err - } - // t.SealEpoch (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SealEpoch = uint64(extra) - // t.DealIDs ([]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.DealIDs = make([]uint64, extra) - } - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = val - } - - return nil -} - -func (t *PreCommittedSector) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Info (actors.SectorPreCommitInfo) (struct) - if err := t.Info.MarshalCBOR(w); err != nil { - return err - } - - // t.ReceivedEpoch (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ReceivedEpoch))); err != nil { - return err - } - return nil -} - -func (t *PreCommittedSector) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Info (actors.SectorPreCommitInfo) (struct) - - { - - if err := t.Info.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ReceivedEpoch (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ReceivedEpoch = uint64(extra) - return nil -} - -func (t *MinerInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.PeerID (peer.ID) (string) - if len(t.PeerID) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.PeerID was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.PeerID)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.PeerID)); err != nil { - return err - } - - // t.SectorSize (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorSize))); err != nil { - return err - } - return nil -} - -func (t *MinerInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.PeerID (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.PeerID = peer.ID(sval) - } - // t.SectorSize (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorSize = uint64(extra) - return nil -} - -func (t *SubmitFallbackPoStParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Proof)))); err != nil { - return err - } - if _, err := w.Write(t.Proof); err != nil { - return err - } - - // t.Candidates ([]types.EPostTicket) (slice) - if len(t.Candidates) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Candidates was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Candidates)))); err != nil { - return err - } - for _, v := range t.Candidates { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *SubmitFallbackPoStParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Proof = make([]byte, extra) - if _, err := io.ReadFull(br, t.Proof); err != nil { - return err - } - // t.Candidates ([]types.EPostTicket) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Candidates: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Candidates = make([]types.EPostTicket, extra) - } - for i := 0; i < int(extra); i++ { - - var v types.EPostTicket - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Candidates[i] = v - } - - return nil -} - -func (t *PaymentVerifyParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Extra ([]uint8) (slice) - if len(t.Extra) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Extra was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Extra)))); err != nil { - return err - } - if _, err := w.Write(t.Extra); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Proof)))); err != nil { - return err - } - if _, err := w.Write(t.Proof); err != nil { - return err - } - return nil -} - -func (t *PaymentVerifyParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Extra ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Extra: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Extra = make([]byte, extra) - if _, err := io.ReadFull(br, t.Extra); err != nil { - return err - } - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Proof = make([]byte, extra) - if _, err := io.ReadFull(br, t.Proof); err != nil { - return err - } - return nil -} - -func (t *UpdatePeerIDParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.PeerID (peer.ID) (string) - if len(t.PeerID) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.PeerID was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.PeerID)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.PeerID)); err != nil { - return err - } - return nil -} - -func (t *UpdatePeerIDParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PeerID (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.PeerID = peer.ID(sval) - } - return nil -} - -func (t *DeclareFaultsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Faults (types.BitField) (struct) - if err := t.Faults.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *DeclareFaultsParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Faults (types.BitField) (struct) - - { - - if err := t.Faults.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *MultiSigActorState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{135}); err != nil { - return err - } - - // t.Signers ([]address.Address) (slice) - if len(t.Signers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Signers was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Signers)))); err != nil { - return err - } - for _, v := range t.Signers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Required (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Required))); err != nil { - return err - } - - // t.NextTxID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.NextTxID))); err != nil { - return err - } - - // t.InitialBalance (types.BigInt) (struct) - if err := t.InitialBalance.MarshalCBOR(w); err != nil { - return err - } - - // t.StartingBlock (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.StartingBlock))); err != nil { - return err - } - - // t.UnlockDuration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.UnlockDuration))); err != nil { - return err - } - - // t.Transactions ([]actors.MTransaction) (slice) - if len(t.Transactions) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Transactions was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Transactions)))); err != nil { - return err - } - for _, v := range t.Transactions { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *MultiSigActorState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Signers ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Signers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Signers = make([]address.Address, extra) - } - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Signers[i] = v - } - - // t.Required (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Required = uint64(extra) - // t.NextTxID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.NextTxID = uint64(extra) - // t.InitialBalance (types.BigInt) (struct) - - { - - if err := t.InitialBalance.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.StartingBlock (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.StartingBlock = uint64(extra) - // t.UnlockDuration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.UnlockDuration = uint64(extra) - // t.Transactions ([]actors.MTransaction) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Transactions: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Transactions = make([]MTransaction, extra) - } - for i := 0; i < int(extra); i++ { - - var v MTransaction - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Transactions[i] = v - } - - return nil -} - -func (t *MultiSigConstructorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Signers ([]address.Address) (slice) - if len(t.Signers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Signers was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Signers)))); err != nil { - return err - } - for _, v := range t.Signers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Required (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Required))); err != nil { - return err - } - - // t.UnlockDuration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.UnlockDuration))); err != nil { - return err - } - return nil -} - -func (t *MultiSigConstructorParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Signers ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Signers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Signers = make([]address.Address, extra) - } - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Signers[i] = v - } - - // t.Required (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Required = uint64(extra) - // t.UnlockDuration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.UnlockDuration = uint64(extra) - return nil -} - -func (t *MultiSigProposeParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.To (address.Address) (struct) - if err := t.To.MarshalCBOR(w); err != nil { - return err - } - - // t.Value (types.BigInt) (struct) - if err := t.Value.MarshalCBOR(w); err != nil { - return err - } - - // t.Method (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Method))); err != nil { - return err - } - - // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Params was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Params)))); err != nil { - return err - } - if _, err := w.Write(t.Params); err != nil { - return err - } - return nil -} - -func (t *MultiSigProposeParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.To (address.Address) (struct) - - { - - if err := t.To.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Value (types.BigInt) (struct) - - { - - if err := t.Value.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Method (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Method = uint64(extra) - // t.Params ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Params: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Params = make([]byte, extra) - if _, err := io.ReadFull(br, t.Params); err != nil { - return err - } - return nil -} - -func (t *MultiSigTxID) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.TxID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.TxID))); err != nil { - return err - } - return nil -} - -func (t *MultiSigTxID) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.TxID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TxID = uint64(extra) - return nil -} - -func (t *MultiSigSwapSignerParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.From (address.Address) (struct) - if err := t.From.MarshalCBOR(w); err != nil { - return err - } - - // t.To (address.Address) (struct) - if err := t.To.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *MultiSigSwapSignerParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.From (address.Address) (struct) - - { - - if err := t.From.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.To (address.Address) (struct) - - { - - if err := t.To.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *MultiSigChangeReqParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Req (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Req))); err != nil { - return err - } - return nil -} - -func (t *MultiSigChangeReqParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Req (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Req = uint64(extra) - return nil -} - -func (t *MTransaction) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{138}); err != nil { - return err - } - - // t.Created (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Created))); err != nil { - return err - } - - // t.TxID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.TxID))); err != nil { - return err - } - - // t.To (address.Address) (struct) - if err := t.To.MarshalCBOR(w); err != nil { - return err - } - - // t.Value (types.BigInt) (struct) - if err := t.Value.MarshalCBOR(w); err != nil { - return err - } - - // t.Method (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Method))); err != nil { - return err - } - - // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Params was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Params)))); err != nil { - return err - } - if _, err := w.Write(t.Params); err != nil { - return err - } - - // t.Approved ([]address.Address) (slice) - if len(t.Approved) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Approved was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Approved)))); err != nil { - return err - } - for _, v := range t.Approved { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Complete (bool) (bool) - if err := cbg.WriteBool(w, t.Complete); err != nil { - return err - } - - // t.Canceled (bool) (bool) - if err := cbg.WriteBool(w, t.Canceled); err != nil { - return err - } - - // t.RetCode (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.RetCode))); err != nil { - return err - } - return nil -} - -func (t *MTransaction) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 10 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Created (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Created = uint64(extra) - // t.TxID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TxID = uint64(extra) - // t.To (address.Address) (struct) - - { - - if err := t.To.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Value (types.BigInt) (struct) - - { - - if err := t.Value.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Method (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Method = uint64(extra) - // t.Params ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Params: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Params = make([]byte, extra) - if _, err := io.ReadFull(br, t.Params); err != nil { - return err - } - // t.Approved ([]address.Address) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Approved: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Approved = make([]address.Address, extra) - } - for i := 0; i < int(extra); i++ { - - var v address.Address - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Approved[i] = v - } - - // t.Complete (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Complete = false - case 21: - t.Complete = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Canceled (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Canceled = false - case 21: - t.Canceled = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.RetCode (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.RetCode = uint64(extra) - return nil -} - -func (t *MultiSigRemoveSignerParam) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Signer (address.Address) (struct) - if err := t.Signer.MarshalCBOR(w); err != nil { - return err - } - - // t.Decrease (bool) (bool) - if err := cbg.WriteBool(w, t.Decrease); err != nil { - return err - } - return nil -} - -func (t *MultiSigRemoveSignerParam) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Signer (address.Address) (struct) - - { - - if err := t.Signer.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Decrease (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Decrease = false - case 21: - t.Decrease = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -func (t *MultiSigAddSignerParam) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Signer (address.Address) (struct) - if err := t.Signer.MarshalCBOR(w); err != nil { - return err - } - - // t.Increase (bool) (bool) - if err := cbg.WriteBool(w, t.Increase); err != nil { - return err - } - return nil -} - -func (t *MultiSigAddSignerParam) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Signer (address.Address) (struct) - - { - - if err := t.Signer.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Increase (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Increase = false - case 21: - t.Increase = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -func (t *PaymentChannelActorState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{134}); err != nil { - return err - } - - // t.From (address.Address) (struct) - if err := t.From.MarshalCBOR(w); err != nil { - return err - } - - // t.To (address.Address) (struct) - if err := t.To.MarshalCBOR(w); err != nil { - return err - } - - // t.ToSend (types.BigInt) (struct) - if err := t.ToSend.MarshalCBOR(w); err != nil { - return err - } - - // t.ClosingAt (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ClosingAt))); err != nil { - return err - } - - // t.MinCloseHeight (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MinCloseHeight))); err != nil { - return err - } - - // t.LaneStates (map[string]*actors.LaneState) (map) - { - if len(t.LaneStates) > 4096 { - return xerrors.Errorf("cannot marshal t.LaneStates map too large") - } - - if err := cbg.CborWriteHeader(w, cbg.MajMap, uint64(len(t.LaneStates))); err != nil { - return err - } - - keys := make([]string, 0, len(t.LaneStates)) - for k := range t.LaneStates { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := t.LaneStates[k] - - if len(k) > cbg.MaxLength { - return xerrors.Errorf("Value in field k was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(k)))); err != nil { - return err - } - if _, err := w.Write([]byte(k)); err != nil { - return err - } - - if err := v.MarshalCBOR(w); err != nil { - return err - } - - } - } - return nil -} - -func (t *PaymentChannelActorState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 6 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.From (address.Address) (struct) - - { - - if err := t.From.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.To (address.Address) (struct) - - { - - if err := t.To.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ToSend (types.BigInt) (struct) - - { - - if err := t.ToSend.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ClosingAt (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ClosingAt = uint64(extra) - // t.MinCloseHeight (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MinCloseHeight = uint64(extra) - // t.LaneStates (map[string]*actors.LaneState) (map) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("expected a map (major type 5)") - } - if extra > 4096 { - return fmt.Errorf("t.LaneStates: map too large") - } - - t.LaneStates = make(map[string]*LaneState, extra) - - for i, l := 0, int(extra); i < l; i++ { - - var k string - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - k = string(sval) - } - - var v *LaneState - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - v = new(LaneState) - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - - t.LaneStates[k] = v - - } - return nil -} - -func (t *PCAConstructorParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.To (address.Address) (struct) - if err := t.To.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PCAConstructorParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.To (address.Address) (struct) - - { - - if err := t.To.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *LaneState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Closed (bool) (bool) - if err := cbg.WriteBool(w, t.Closed); err != nil { - return err - } - - // t.Redeemed (types.BigInt) (struct) - if err := t.Redeemed.MarshalCBOR(w); err != nil { - return err - } - - // t.Nonce (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Nonce))); err != nil { - return err - } - return nil -} - -func (t *LaneState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Closed (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Closed = false - case 21: - t.Closed = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Redeemed (types.BigInt) (struct) - - { - - if err := t.Redeemed.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Nonce (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Nonce = uint64(extra) - return nil -} - -func (t *PCAUpdateChannelStateParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Sv (types.SignedVoucher) (struct) - if err := t.Sv.MarshalCBOR(w); err != nil { - return err - } - - // t.Secret ([]uint8) (slice) - if len(t.Secret) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Secret was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Secret)))); err != nil { - return err - } - if _, err := w.Write(t.Secret); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Proof)))); err != nil { - return err - } - if _, err := w.Write(t.Proof); err != nil { - return err - } - return nil -} - -func (t *PCAUpdateChannelStateParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Sv (types.SignedVoucher) (struct) - - { - - if err := t.Sv.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Secret ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Secret: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Secret = make([]byte, extra) - if _, err := io.ReadFull(br, t.Secret); err != nil { - return err - } - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Proof = make([]byte, extra) - if _, err := io.ReadFull(br, t.Proof); err != nil { - return err - } - return nil -} - -func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.PayChActor (address.Address) (struct) - if err := t.PayChActor.MarshalCBOR(w); err != nil { - return err - } - - // t.Payer (address.Address) (struct) - if err := t.Payer.MarshalCBOR(w); err != nil { - return err - } - - // t.ChannelMessage (cid.Cid) (struct) - - if t.ChannelMessage == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(w, *t.ChannelMessage); err != nil { - return xerrors.Errorf("failed to write cid field t.ChannelMessage: %w", err) - } - } - - // t.Vouchers ([]*types.SignedVoucher) (slice) - if len(t.Vouchers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Vouchers was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Vouchers)))); err != nil { - return err - } - for _, v := range t.Vouchers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PayChActor (address.Address) (struct) - - { - - if err := t.PayChActor.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Payer (address.Address) (struct) - - { - - if err := t.Payer.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ChannelMessage (cid.Cid) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ChannelMessage: %w", err) - } - - t.ChannelMessage = &c - } - - } - // t.Vouchers ([]*types.SignedVoucher) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Vouchers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Vouchers = make([]*types.SignedVoucher, extra) - } - for i := 0; i < int(extra); i++ { - - var v types.SignedVoucher - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Vouchers[i] = &v - } - - return nil -} - -func (t *StoragePowerState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{133}); err != nil { - return err - } - - // t.Miners (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Miners); err != nil { - return xerrors.Errorf("failed to write cid field t.Miners: %w", err) - } - - // t.ProvingBuckets (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.ProvingBuckets); err != nil { - return xerrors.Errorf("failed to write cid field t.ProvingBuckets: %w", err) - } - - // t.MinerCount (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MinerCount))); err != nil { - return err - } - - // t.LastMinerCheck (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.LastMinerCheck))); err != nil { - return err - } - - // t.TotalStorage (types.BigInt) (struct) - if err := t.TotalStorage.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *StoragePowerState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Miners (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Miners: %w", err) - } - - t.Miners = c - - } - // t.ProvingBuckets (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProvingBuckets: %w", err) - } - - t.ProvingBuckets = c - - } - // t.MinerCount (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MinerCount = uint64(extra) - // t.LastMinerCheck (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.LastMinerCheck = uint64(extra) - // t.TotalStorage (types.BigInt) (struct) - - { - - if err := t.TotalStorage.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *CreateStorageMinerParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.Owner (address.Address) (struct) - if err := t.Owner.MarshalCBOR(w); err != nil { - return err - } - - // t.Worker (address.Address) (struct) - if err := t.Worker.MarshalCBOR(w); err != nil { - return err - } - - // t.SectorSize (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorSize))); err != nil { - return err - } - - // t.PeerID (peer.ID) (string) - if len(t.PeerID) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.PeerID was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.PeerID)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.PeerID)); err != nil { - return err - } - return nil -} - -func (t *CreateStorageMinerParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Owner (address.Address) (struct) - - { - - if err := t.Owner.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Worker (address.Address) (struct) - - { - - if err := t.Worker.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.SectorSize (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorSize = uint64(extra) - // t.PeerID (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.PeerID = peer.ID(sval) - } - return nil -} - -func (t *IsValidMinerParam) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Addr (address.Address) (struct) - if err := t.Addr.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *IsValidMinerParam) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Addr (address.Address) (struct) - - { - - if err := t.Addr.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *PowerLookupParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Miner (address.Address) (struct) - if err := t.Miner.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PowerLookupParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Miner (address.Address) (struct) - - { - - if err := t.Miner.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *UpdateStorageParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Delta (types.BigInt) (struct) - if err := t.Delta.MarshalCBOR(w); err != nil { - return err - } - - // t.NextSlashDeadline (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.NextSlashDeadline))); err != nil { - return err - } - - // t.PreviousSlashDeadline (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PreviousSlashDeadline))); err != nil { - return err - } - return nil -} - -func (t *UpdateStorageParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Delta (types.BigInt) (struct) - - { - - if err := t.Delta.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.NextSlashDeadline (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.NextSlashDeadline = uint64(extra) - // t.PreviousSlashDeadline (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PreviousSlashDeadline = uint64(extra) - return nil -} - -func (t *ArbitrateConsensusFaultParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Block1 (types.BlockHeader) (struct) - if err := t.Block1.MarshalCBOR(w); err != nil { - return err - } - - // t.Block2 (types.BlockHeader) (struct) - if err := t.Block2.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *ArbitrateConsensusFaultParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Block1 (types.BlockHeader) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Block1 = new(types.BlockHeader) - if err := t.Block1.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - // t.Block2 (types.BlockHeader) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Block2 = new(types.BlockHeader) - if err := t.Block2.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *PledgeCollateralParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Size (types.BigInt) (struct) - if err := t.Size.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *PledgeCollateralParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Size (types.BigInt) (struct) - - { - - if err := t.Size.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *MinerSlashConsensusFault) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Slasher (address.Address) (struct) - if err := t.Slasher.MarshalCBOR(w); err != nil { - return err - } - - // t.AtHeight (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.AtHeight))); err != nil { - return err - } - - // t.SlashedCollateral (types.BigInt) (struct) - if err := t.SlashedCollateral.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *MinerSlashConsensusFault) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Slasher (address.Address) (struct) - - { - - if err := t.Slasher.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.AtHeight (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.AtHeight = uint64(extra) - // t.SlashedCollateral (types.BigInt) (struct) - - { - - if err := t.SlashedCollateral.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *StorageParticipantBalance) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Locked (types.BigInt) (struct) - if err := t.Locked.MarshalCBOR(w); err != nil { - return err - } - - // t.Available (types.BigInt) (struct) - if err := t.Available.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *StorageParticipantBalance) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Locked (types.BigInt) (struct) - - { - - if err := t.Locked.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Available (types.BigInt) (struct) - - { - - if err := t.Available.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *StorageMarketState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Balances (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Balances); err != nil { - return xerrors.Errorf("failed to write cid field t.Balances: %w", err) - } - - // t.Deals (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Deals); err != nil { - return xerrors.Errorf("failed to write cid field t.Deals: %w", err) - } - - // t.NextDealID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.NextDealID))); err != nil { - return err - } - return nil -} - -func (t *StorageMarketState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Balances (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Balances: %w", err) - } - - t.Balances = c - - } - // t.Deals (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Deals: %w", err) - } - - t.Deals = c - - } - // t.NextDealID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.NextDealID = uint64(extra) - return nil -} - -func (t *WithdrawBalanceParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Balance (types.BigInt) (struct) - if err := t.Balance.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *WithdrawBalanceParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Balance (types.BigInt) (struct) - - { - - if err := t.Balance.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *StorageDealProposal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{137}); err != nil { - return err - } - - // t.PieceRef ([]uint8) (slice) - if len(t.PieceRef) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.PieceRef was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.PieceRef)))); err != nil { - return err - } - if _, err := w.Write(t.PieceRef); err != nil { - return err - } - - // t.PieceSize (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PieceSize))); err != nil { - return err - } - - // t.Client (address.Address) (struct) - if err := t.Client.MarshalCBOR(w); err != nil { - return err - } - - // t.Provider (address.Address) (struct) - if err := t.Provider.MarshalCBOR(w); err != nil { - return err - } - - // t.ProposalExpiration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ProposalExpiration))); err != nil { - return err - } - - // t.Duration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Duration))); err != nil { - return err - } - - // t.StoragePricePerEpoch (types.BigInt) (struct) - if err := t.StoragePricePerEpoch.MarshalCBOR(w); err != nil { - return err - } - - // t.StorageCollateral (types.BigInt) (struct) - if err := t.StorageCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.ProposerSignature (types.Signature) (struct) - if err := t.ProposerSignature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *StorageDealProposal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 9 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PieceRef ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.PieceRef: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.PieceRef = make([]byte, extra) - if _, err := io.ReadFull(br, t.PieceRef); err != nil { - return err - } - // t.PieceSize (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PieceSize = uint64(extra) - // t.Client (address.Address) (struct) - - { - - if err := t.Client.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Provider (address.Address) (struct) - - { - - if err := t.Provider.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ProposalExpiration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ProposalExpiration = uint64(extra) - // t.Duration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Duration = uint64(extra) - // t.StoragePricePerEpoch (types.BigInt) (struct) - - { - - if err := t.StoragePricePerEpoch.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.StorageCollateral (types.BigInt) (struct) - - { - - if err := t.StorageCollateral.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ProposerSignature (types.Signature) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.ProposerSignature = new(types.Signature) - if err := t.ProposerSignature.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *PublishStorageDealsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Deals ([]actors.StorageDealProposal) (slice) - if len(t.Deals) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Deals was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Deals)))); err != nil { - return err - } - for _, v := range t.Deals { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *PublishStorageDealsParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deals ([]actors.StorageDealProposal) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Deals: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Deals = make([]StorageDealProposal, extra) - } - for i := 0; i < int(extra); i++ { - - var v StorageDealProposal - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Deals[i] = v - } - - return nil -} - -func (t *PublishStorageDealResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.DealIDs ([]uint64) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.DealIDs)))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - return nil -} - -func (t *PublishStorageDealResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealIDs ([]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.DealIDs = make([]uint64, extra) - } - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = val - } - - return nil -} - -func (t *ActivateStorageDealsParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Deals ([]uint64) (slice) - if len(t.Deals) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Deals was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Deals)))); err != nil { - return err - } - for _, v := range t.Deals { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - return nil -} - -func (t *ActivateStorageDealsParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Deals ([]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Deals: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Deals = make([]uint64, extra) - } - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.Deals slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.Deals was not a uint, instead got %d", maj) - } - - t.Deals[i] = val - } - - return nil -} - -func (t *ProcessStorageDealsPaymentParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.DealIDs ([]uint64) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.DealIDs)))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - return nil -} - -func (t *ProcessStorageDealsPaymentParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealIDs ([]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.DealIDs = make([]uint64, extra) - } - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = val - } - - return nil -} - -func (t *OnChainDeal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{137}); err != nil { - return err - } - - // t.PieceRef ([]uint8) (slice) - if len(t.PieceRef) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.PieceRef was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.PieceRef)))); err != nil { - return err - } - if _, err := w.Write(t.PieceRef); err != nil { - return err - } - - // t.PieceSize (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PieceSize))); err != nil { - return err - } - - // t.Client (address.Address) (struct) - if err := t.Client.MarshalCBOR(w); err != nil { - return err - } - - // t.Provider (address.Address) (struct) - if err := t.Provider.MarshalCBOR(w); err != nil { - return err - } - - // t.ProposalExpiration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ProposalExpiration))); err != nil { - return err - } - - // t.Duration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Duration))); err != nil { - return err - } - - // t.StoragePricePerEpoch (types.BigInt) (struct) - if err := t.StoragePricePerEpoch.MarshalCBOR(w); err != nil { - return err - } - - // t.StorageCollateral (types.BigInt) (struct) - if err := t.StorageCollateral.MarshalCBOR(w); err != nil { - return err - } - - // t.ActivationEpoch (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ActivationEpoch))); err != nil { - return err - } - return nil -} - -func (t *OnChainDeal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 9 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PieceRef ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.PieceRef: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.PieceRef = make([]byte, extra) - if _, err := io.ReadFull(br, t.PieceRef); err != nil { - return err - } - // t.PieceSize (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PieceSize = uint64(extra) - // t.Client (address.Address) (struct) - - { - - if err := t.Client.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Provider (address.Address) (struct) - - { - - if err := t.Provider.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ProposalExpiration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ProposalExpiration = uint64(extra) - // t.Duration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Duration = uint64(extra) - // t.StoragePricePerEpoch (types.BigInt) (struct) - - { - - if err := t.StoragePricePerEpoch.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.StorageCollateral (types.BigInt) (struct) - - { - - if err := t.StorageCollateral.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ActivationEpoch (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ActivationEpoch = uint64(extra) - return nil -} - -func (t *ComputeDataCommitmentParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.DealIDs ([]uint64) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.DealIDs)))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - - // t.SectorSize (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorSize))); err != nil { - return err - } - return nil -} - -func (t *ComputeDataCommitmentParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealIDs ([]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.DealIDs = make([]uint64, extra) - } - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = val - } - - // t.SectorSize (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorSize = uint64(extra) - return nil -} - -func (t *SectorProveCommitInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Proof)))); err != nil { - return err - } - if _, err := w.Write(t.Proof); err != nil { - return err - } - - // t.SectorID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorID))); err != nil { - return err - } - - // t.DealIDs ([]uint64) (slice) - if len(t.DealIDs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.DealIDs was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.DealIDs)))); err != nil { - return err - } - for _, v := range t.DealIDs { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - return nil -} - -func (t *SectorProveCommitInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Proof = make([]byte, extra) - if _, err := io.ReadFull(br, t.Proof); err != nil { - return err - } - // t.SectorID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = uint64(extra) - // t.DealIDs ([]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.DealIDs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.DealIDs = make([]uint64, extra) - } - for i := 0; i < int(extra); i++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) - } - - t.DealIDs[i] = val - } - - return nil -} - -func (t *CheckMinerParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.NetworkPower (types.BigInt) (struct) - if err := t.NetworkPower.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *CheckMinerParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.NetworkPower (types.BigInt) (struct) - - { - - if err := t.NetworkPower.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *CronActorState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{128}); err != nil { - return err - } - return nil -} - -func (t *CronActorState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 0 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - return nil -} diff --git a/chain/actors/error.go b/chain/actors/error.go deleted file mode 100644 index a24f952da..000000000 --- a/chain/actors/error.go +++ /dev/null @@ -1,5 +0,0 @@ -package actors - -import "github.com/filecoin-project/lotus/chain/actors/aerrors" - -type ActorError = aerrors.ActorError diff --git a/chain/actors/forks.go b/chain/actors/forks.go deleted file mode 100644 index 46f8cafd8..000000000 --- a/chain/actors/forks.go +++ /dev/null @@ -1,46 +0,0 @@ -package actors - -import ( - "reflect" - "sort" - - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" -) - -type update struct { - start uint64 - method interface{} -} - -func withUpdates(updates ...update) interface{} { - sort.Slice(updates, func(i, j int) bool { // so we iterate from newest below - return updates[i].start > updates[j].start - }) - - // -} - -func notFound(vmctx types.VMContext) func() ([]byte, ActorError) { - return func() ([]byte, ActorError) { - return nil, aerrors.Fatal("no code for method %d at height %d", vmctx.Message().Method, vmctx.BlockHeight()) - } -} diff --git a/chain/actors/harness2_test.go b/chain/actors/harness2_test.go deleted file mode 100644 index d5d34795b..000000000 --- a/chain/actors/harness2_test.go +++ /dev/null @@ -1,337 +0,0 @@ -package actors_test - -import ( - "bytes" - "context" - "math/rand" - "testing" - - "github.com/ipfs/go-cid" - dstore "github.com/ipfs/go-datastore" - hamt "github.com/ipfs/go-hamt-ipld" - blockstore "github.com/ipfs/go-ipfs-blockstore" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" -) - -const testGasLimit = 10000 - -type HarnessInit struct { - NAddrs uint64 - Addrs map[address.Address]types.BigInt - Miner address.Address -} - -type HarnessStage int - -const ( - HarnessPreInit HarnessStage = iota - HarnessPostInit -) - -type HarnessOpt func(testing.TB, *Harness) error - -type Harness struct { - HI HarnessInit - Stage HarnessStage - Nonces map[address.Address]uint64 - GasCharges map[address.Address]types.BigInt - Rand vm.Rand - BlockHeight uint64 - - lastBalanceCheck map[address.Address]types.BigInt - - ctx context.Context - bs blockstore.Blockstore - vm *vm.VM - cs *store.ChainStore - w *wallet.Wallet -} - -var HarnessMinerFunds = types.NewInt(1000000) - -func HarnessAddr(addr *address.Address, value uint64) HarnessOpt { - return func(t testing.TB, h *Harness) error { - if h.Stage != HarnessPreInit { - return nil - } - hi := &h.HI - if addr.Empty() { - k, err := h.w.GenerateKey(types.KTSecp256k1) - if err != nil { - t.Fatal(err) - } - - *addr = k - } - hi.Addrs[*addr] = types.NewInt(value) - return nil - } -} - -func HarnessMiner(addr *address.Address) HarnessOpt { - return func(_ testing.TB, h *Harness) error { - if h.Stage != HarnessPreInit { - return nil - } - hi := &h.HI - if addr.Empty() { - *addr = hi.Miner - return nil - } - delete(hi.Addrs, hi.Miner) - hi.Miner = *addr - return nil - } -} - -func HarnessActor(actor *address.Address, creator *address.Address, code cid.Cid, params func() cbg.CBORMarshaler) HarnessOpt { - return func(t testing.TB, h *Harness) error { - if h.Stage != HarnessPostInit { - return nil - } - if !actor.Empty() { - return xerrors.New("actor address should be empty") - } - - ret, _ := h.CreateActor(t, *creator, code, params()) - if ret.ExitCode != 0 { - return xerrors.Errorf("creating actor: %w", ret.ActorErr) - } - var err error - *actor, err = address.NewFromBytes(ret.Return) - return err - } - -} - -func HarnessCtx(ctx context.Context) HarnessOpt { - return func(t testing.TB, h *Harness) error { - h.ctx = ctx - return nil - } -} - -func NewHarness(t *testing.T, options ...HarnessOpt) *Harness { - w, err := wallet.NewWallet(wallet.NewMemKeyStore()) - if err != nil { - t.Fatal(err) - } - h := &Harness{ - Stage: HarnessPreInit, - Nonces: make(map[address.Address]uint64), - Rand: &fakeRand{}, - HI: HarnessInit{ - NAddrs: 1, - Miner: blsaddr(0), - Addrs: map[address.Address]types.BigInt{ - blsaddr(0): HarnessMinerFunds, - }, - }, - GasCharges: make(map[address.Address]types.BigInt), - - lastBalanceCheck: make(map[address.Address]types.BigInt), - w: w, - ctx: context.Background(), - bs: bstore.NewBlockstore(dstore.NewMapDatastore()), - BlockHeight: 0, - } - for _, opt := range options { - err := opt(t, h) - if err != nil { - t.Fatalf("Applying options: %v", err) - } - } - - st, err := gen.MakeInitialStateTree(h.bs, h.HI.Addrs) - if err != nil { - t.Fatal(err) - } - - stateroot, err := st.Flush() - if err != nil { - t.Fatal(err) - } - - stateroot, err = gen.SetupStorageMarketActor(h.bs, stateroot, nil) - if err != nil { - t.Fatal(err) - } - - h.cs = store.NewChainStore(h.bs, nil) - h.vm, err = vm.NewVM(stateroot, 1, h.Rand, h.HI.Miner, h.cs.Blockstore()) - if err != nil { - t.Fatal(err) - } - h.Stage = HarnessPostInit - for _, opt := range options { - err := opt(t, h) - if err != nil { - t.Fatalf("Applying options: %v", err) - } - } - - return h -} - -func (h *Harness) Apply(t testing.TB, msg types.Message) (*vm.ApplyRet, *state.StateTree) { - t.Helper() - if msg.Nonce == 0 { - msg.Nonce, _ = h.Nonces[msg.From] - h.Nonces[msg.From] = msg.Nonce + 1 - } - - ret, err := h.vm.ApplyMessage(h.ctx, &msg) - if err != nil { - t.Fatalf("Applying message: %+v", err) - } - - if ret != nil { - if prev, ok := h.GasCharges[msg.From]; ok { - h.GasCharges[msg.From] = types.BigAdd(prev, ret.GasUsed) - } else { - h.GasCharges[msg.From] = ret.GasUsed - } - } - - stateroot, err := h.vm.Flush(context.TODO()) - if err != nil { - t.Fatalf("Flushing VM: %+v", err) - } - cst := hamt.CSTFromBstore(h.bs) - state, err := state.LoadStateTree(cst, stateroot) - if err != nil { - t.Fatalf("Loading state tree: %+v", err) - } - return ret, state -} - -func (h *Harness) CreateActor(t testing.TB, from address.Address, - code cid.Cid, params cbg.CBORMarshaler) (*vm.ApplyRet, *state.StateTree) { - t.Helper() - - return h.Apply(t, types.Message{ - To: actors.InitAddress, - From: from, - Method: actors.IAMethods.Exec, - Params: DumpObject(t, - &actors.ExecParams{ - Code: code, - Params: DumpObject(t, params), - }), - GasPrice: types.NewInt(1), - GasLimit: types.NewInt(testGasLimit), - Value: types.NewInt(0), - }) -} - -func (h *Harness) SendFunds(t testing.TB, from address.Address, to address.Address, - value types.BigInt) (*vm.ApplyRet, *state.StateTree) { - t.Helper() - return h.Apply(t, types.Message{ - To: to, - From: from, - Method: 0, - Value: value, - GasPrice: types.NewInt(1), - GasLimit: types.NewInt(testGasLimit), - }) -} - -func (h *Harness) Invoke(t testing.TB, from address.Address, to address.Address, - method uint64, params cbg.CBORMarshaler) (*vm.ApplyRet, *state.StateTree) { - t.Helper() - return h.InvokeWithValue(t, from, to, method, types.NewInt(0), params) -} - -func (h *Harness) InvokeWithValue(t testing.TB, from address.Address, to address.Address, - method uint64, value types.BigInt, params cbg.CBORMarshaler) (*vm.ApplyRet, *state.StateTree) { - t.Helper() - h.vm.SetBlockHeight(h.BlockHeight) - return h.Apply(t, types.Message{ - To: to, - From: from, - Method: method, - Value: value, - Params: DumpObject(t, params), - GasPrice: types.NewInt(1), - GasLimit: types.NewInt(testGasLimit), - }) -} - -func (h *Harness) AssertBalance(t testing.TB, addr address.Address, amt uint64) { - t.Helper() - - b, err := h.vm.ActorBalance(addr) - if err != nil { - t.Fatalf("%+v", err) - } - - if types.BigCmp(types.NewInt(amt), b) != 0 { - t.Errorf("expected %s to have balanced of %d. Instead has %s", addr, amt, b) - } -} - -func (h *Harness) AssertBalanceChange(t testing.TB, addr address.Address, amt int64) { - t.Helper() - lastBalance, ok := h.lastBalanceCheck[addr] - if !ok { - lastBalance, ok = h.HI.Addrs[addr] - if !ok { - lastBalance = types.NewInt(0) - } - } - - var expected types.BigInt - - if amt >= 0 { - expected = types.BigAdd(lastBalance, types.NewInt(uint64(amt))) - } else { - expected = types.BigSub(lastBalance, types.NewInt(uint64(-amt))) - } - - h.lastBalanceCheck[addr] = expected - - if gasUsed, ok := h.GasCharges[addr]; ok { - expected = types.BigSub(expected, gasUsed) - } - - b, err := h.vm.ActorBalance(addr) - if err != nil { - t.Fatalf("%+v", err) - } - - if types.BigCmp(expected, b) != 0 { - t.Errorf("expected %s to have balanced of %d. Instead has %s", addr, amt, b) - } -} - -func DumpObject(t testing.TB, obj cbg.CBORMarshaler) []byte { - if obj == nil { - return nil - } - t.Helper() - b := new(bytes.Buffer) - if err := obj.MarshalCBOR(b); err != nil { - t.Fatalf("dumping params: %+v", err) - } - return b.Bytes() -} - -type fakeRand struct{} - -func (fr *fakeRand) GetRandomness(ctx context.Context, h int64) ([]byte, error) { - out := make([]byte, 32) - rand.New(rand.NewSource(h)).Read(out) - return out, nil -} diff --git a/chain/actors/params.go b/chain/actors/params.go index c713167ce..e14dcafc9 100644 --- a/chain/actors/params.go +++ b/chain/actors/params.go @@ -7,10 +7,6 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" ) -var ( - EmptyStructCBOR = []byte{0xa0} -) - func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { buf := new(bytes.Buffer) if err := i.MarshalCBOR(buf); err != nil { diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go new file mode 100644 index 000000000..b8205177e --- /dev/null +++ b/chain/actors/policy/policy.go @@ -0,0 +1,54 @@ +package policy + +import ( + "github.com/filecoin-project/go-state-types/abi" + + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" +) + +// SetSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { + newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types)) + for _, t := range types { + newTypes[t] = struct{}{} + } + // Set for all miner versions. + miner0.SupportedProofTypes = newTypes +} + +// AddSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { + for _, t := range types { + // Set for all miner versions. + miner0.SupportedProofTypes[t] = struct{}{} + } +} + +// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all +// actors versions. Use for testing. +func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { + // Set for all miner versions. + miner0.PreCommitChallengeDelay = delay +} + +// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. +func GetPreCommitChallengeDelay() abi.ChainEpoch { + return miner0.PreCommitChallengeDelay +} + +// SetConsensusMinerMinPower sets the minimum power of an individual miner must +// meet for leader election, across all actor versions. This should only be used +// for testing. +func SetConsensusMinerMinPower(p abi.StoragePower) { + power0.ConsensusMinerMinPower = p +} + +// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should +// only be used for testing. +func SetMinVerifiedDealSize(size abi.StoragePower) { + verifreg0.MinVerifiedDealSize = size +} diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go new file mode 100644 index 000000000..be64362a2 --- /dev/null +++ b/chain/actors/policy/policy_test.go @@ -0,0 +1,36 @@ +package policy + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +func TestSupportedProofTypes(t *testing.T) { + var oldTypes []abi.RegisteredSealProof + for t := range miner0.SupportedProofTypes { + oldTypes = append(oldTypes, t) + } + t.Cleanup(func() { + SetSupportedProofTypes(oldTypes...) + }) + + SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + require.EqualValues(t, + miner0.SupportedProofTypes, + map[abi.RegisteredSealProof]struct{}{ + abi.RegisteredSealProof_StackedDrg2KiBV1: {}, + }, + ) + AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1) + require.EqualValues(t, + miner0.SupportedProofTypes, + map[abi.RegisteredSealProof]struct{}{ + abi.RegisteredSealProof_StackedDrg2KiBV1: {}, + abi.RegisteredSealProof_StackedDrg8MiBV1: {}, + }, + ) +} diff --git a/chain/badtscache.go b/chain/badtscache.go index 070375062..3c5bf05ef 100644 --- a/chain/badtscache.go +++ b/chain/badtscache.go @@ -1,6 +1,8 @@ package chain import ( + "fmt" + "github.com/filecoin-project/lotus/build" lru "github.com/hashicorp/golang-lru" "github.com/ipfs/go-cid" @@ -10,6 +12,35 @@ type BadBlockCache struct { badBlocks *lru.ARCCache } +type BadBlockReason struct { + Reason string + TipSet []cid.Cid + OriginalReason *BadBlockReason +} + +func NewBadBlockReason(cid []cid.Cid, format string, i ...interface{}) BadBlockReason { + return BadBlockReason{ + TipSet: cid, + Reason: fmt.Sprintf(format, i...), + } +} + +func (bbr BadBlockReason) Linked(reason string, i ...interface{}) BadBlockReason { + or := &bbr + if bbr.OriginalReason != nil { + or = bbr.OriginalReason + } + return BadBlockReason{Reason: fmt.Sprintf(reason, i...), OriginalReason: or} +} + +func (bbr BadBlockReason) String() string { + res := bbr.Reason + if bbr.OriginalReason != nil { + res += " caused by: " + fmt.Sprintf("%s %s", bbr.OriginalReason.TipSet, bbr.OriginalReason.String()) + } + return res +} + func NewBadBlockCache() *BadBlockCache { cache, err := lru.NewARC(build.BadBlockCacheSize) if err != nil { @@ -21,10 +52,19 @@ func NewBadBlockCache() *BadBlockCache { } } -func (bts *BadBlockCache) Add(c cid.Cid) { - bts.badBlocks.Add(c, nil) +func (bts *BadBlockCache) Add(c cid.Cid, bbr BadBlockReason) { + bts.badBlocks.Add(c, bbr) } -func (bts *BadBlockCache) Has(c cid.Cid) bool { - return bts.badBlocks.Contains(c) +func (bts *BadBlockCache) Remove(c cid.Cid) { + bts.badBlocks.Remove(c) +} + +func (bts *BadBlockCache) Has(c cid.Cid) (BadBlockReason, bool) { + rval, ok := bts.badBlocks.Get(c) + if !ok { + return BadBlockReason{}, false + } + + return rval.(BadBlockReason), true } diff --git a/chain/beacon/beacon.go b/chain/beacon/beacon.go new file mode 100644 index 000000000..9543bec54 --- /dev/null +++ b/chain/beacon/beacon.go @@ -0,0 +1,159 @@ +package beacon + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + logging "github.com/ipfs/go-log" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" +) + +var log = logging.Logger("beacon") + +type Response struct { + Entry types.BeaconEntry + Err error +} + +type Schedule []BeaconPoint + +func (bs Schedule) BeaconForEpoch(e abi.ChainEpoch) RandomBeacon { + for i := len(bs) - 1; i >= 0; i-- { + bp := bs[i] + if e >= bp.Start { + return bp.Beacon + } + } + return bs[0].Beacon +} + +type BeaconPoint struct { + Start abi.ChainEpoch + Beacon RandomBeacon +} + +// RandomBeacon represents a system that provides randomness to Lotus. +// Other components interrogate the RandomBeacon to acquire randomness that's +// valid for a specific chain epoch. Also to verify beacon entries that have +// been posted on chain. +type RandomBeacon interface { + Entry(context.Context, uint64) <-chan Response + VerifyEntry(types.BeaconEntry, types.BeaconEntry) error + MaxBeaconRoundForEpoch(abi.ChainEpoch) uint64 +} + +func ValidateBlockValues(bSchedule Schedule, h *types.BlockHeader, parentEpoch abi.ChainEpoch, + prevEntry types.BeaconEntry) error { + { + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(h.Height) + if parentBeacon != currBeacon { + if len(h.BeaconEntries) != 2 { + return xerrors.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries)) + } + err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0]) + if err != nil { + return xerrors.Errorf("beacon at fork point invalid: (%v, %v): %w", + h.BeaconEntries[1], h.BeaconEntries[0], err) + } + return nil + } + } + + // TODO: fork logic + b := bSchedule.BeaconForEpoch(h.Height) + maxRound := b.MaxBeaconRoundForEpoch(h.Height) + if maxRound == prevEntry.Round { + if len(h.BeaconEntries) != 0 { + return xerrors.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries)) + } + return nil + } + + if len(h.BeaconEntries) == 0 { + return xerrors.Errorf("expected to have beacon entries in this block, but didn't find any") + } + + last := h.BeaconEntries[len(h.BeaconEntries)-1] + if last.Round != maxRound { + return xerrors.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round) + } + + for i, e := range h.BeaconEntries { + if err := b.VerifyEntry(e, prevEntry); err != nil { + return xerrors.Errorf("beacon entry %d (%d - %x (%d)) was invalid: %w", i, e.Round, e.Data, len(e.Data), err) + } + prevEntry = e + } + + return nil +} + +func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, epoch abi.ChainEpoch, parentEpoch abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) { + { + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(epoch) + if parentBeacon != currBeacon { + // Fork logic + round := currBeacon.MaxBeaconRoundForEpoch(epoch) + out := make([]types.BeaconEntry, 2) + rch := currBeacon.Entry(ctx, round-1) + res := <-rch + if res.Err != nil { + return nil, xerrors.Errorf("getting entry %d returned error: %w", round-1, res.Err) + } + out[0] = res.Entry + rch = currBeacon.Entry(ctx, round) + res = <-rch + if res.Err != nil { + return nil, xerrors.Errorf("getting entry %d returned error: %w", round, res.Err) + } + out[1] = res.Entry + return out, nil + } + } + + beacon := bSchedule.BeaconForEpoch(epoch) + + start := build.Clock.Now() + + maxRound := beacon.MaxBeaconRoundForEpoch(epoch) + if maxRound == prev.Round { + return nil, nil + } + + // TODO: this is a sketchy way to handle the genesis block not having a beacon entry + if prev.Round == 0 { + prev.Round = maxRound - 1 + } + + cur := maxRound + var out []types.BeaconEntry + for cur > prev.Round { + rch := beacon.Entry(ctx, cur) + select { + case resp := <-rch: + if resp.Err != nil { + return nil, xerrors.Errorf("beacon entry request returned error: %w", resp.Err) + } + + out = append(out, resp.Entry) + cur = resp.Entry.Round - 1 + case <-ctx.Done(): + return nil, xerrors.Errorf("context timed out waiting on beacon entry to come back for epoch %d: %w", epoch, ctx.Err()) + } + } + + log.Debugw("fetching beacon entries", "took", build.Clock.Since(start), "numEntries", len(out)) + reverse(out) + return out, nil +} + +func reverse(arr []types.BeaconEntry) { + for i := 0; i < len(arr)/2; i++ { + arr[i], arr[len(arr)-(1+i)] = arr[len(arr)-(1+i)], arr[i] + } +} diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go new file mode 100644 index 000000000..6e8e83a20 --- /dev/null +++ b/chain/beacon/drand/drand.go @@ -0,0 +1,197 @@ +package drand + +import ( + "bytes" + "context" + "sync" + "time" + + dchain "github.com/drand/drand/chain" + dclient "github.com/drand/drand/client" + hclient "github.com/drand/drand/client/http" + dlog "github.com/drand/drand/log" + gclient "github.com/drand/drand/lp2p/client" + "github.com/drand/kyber" + kzap "github.com/go-kit/kit/log/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/xerrors" + + logging "github.com/ipfs/go-log" + pubsub "github.com/libp2p/go-libp2p-pubsub" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +var log = logging.Logger("drand") + +type drandPeer struct { + addr string + tls bool +} + +func (dp *drandPeer) Address() string { + return dp.addr +} + +func (dp *drandPeer) IsTLS() bool { + return dp.tls +} + +// DrandBeacon connects Lotus with a drand network in order to provide +// randomness to the system in a way that's aligned with Filecoin rounds/epochs. +// +// We connect to drand peers via their public HTTP endpoints. The peers are +// enumerated in the drandServers variable. +// +// The root trust for the Drand chain is configured from build.DrandChain. +type DrandBeacon struct { + client dclient.Client + + pubkey kyber.Point + + // seconds + interval time.Duration + + drandGenTime uint64 + filGenTime uint64 + filRoundTime uint64 + + cacheLk sync.Mutex + localCache map[uint64]types.BeaconEntry +} + +func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) { + if genesisTs == 0 { + panic("what are you doing this cant be zero") + } + + drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(config.ChainInfoJSON))) + if err != nil { + return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err) + } + + dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger( + log.SugaredLogger.Desugar(), zapcore.InfoLevel)) + + var clients []dclient.Client + for _, url := range config.Servers { + hc, err := hclient.NewWithInfo(url, drandChain, nil) + if err != nil { + return nil, xerrors.Errorf("could not create http drand client: %w", err) + } + clients = append(clients, hc) + + } + + opts := []dclient.Option{ + dclient.WithChainInfo(drandChain), + dclient.WithCacheSize(1024), + dclient.WithLogger(dlogger), + dclient.WithAutoWatch(), + } + + if ps != nil { + opts = append(opts, gclient.WithPubsub(ps)) + } else { + log.Info("drand beacon without pubsub") + } + + client, err := dclient.Wrap(clients, opts...) + if err != nil { + return nil, xerrors.Errorf("creating drand client") + } + + db := &DrandBeacon{ + client: client, + localCache: make(map[uint64]types.BeaconEntry), + } + + db.pubkey = drandChain.PublicKey + db.interval = drandChain.Period + db.drandGenTime = uint64(drandChain.GenesisTime) + db.filRoundTime = interval + db.filGenTime = genesisTs + + return db, nil +} + +func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Response { + out := make(chan beacon.Response, 1) + if round != 0 { + be := db.getCachedValue(round) + if be != nil { + out <- beacon.Response{Entry: *be} + close(out) + return out + } + } + + go func() { + start := build.Clock.Now() + log.Infow("start fetching randomness", "round", round) + resp, err := db.client.Get(ctx, round) + + var br beacon.Response + if err != nil { + br.Err = xerrors.Errorf("drand failed Get request: %w", err) + } else { + br.Entry.Round = resp.Round() + br.Entry.Data = resp.Signature() + } + log.Infow("done fetching randomness", "round", round, "took", build.Clock.Since(start)) + out <- br + close(out) + }() + + return out +} +func (db *DrandBeacon) cacheValue(e types.BeaconEntry) { + db.cacheLk.Lock() + defer db.cacheLk.Unlock() + db.localCache[e.Round] = e +} + +func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { + db.cacheLk.Lock() + defer db.cacheLk.Unlock() + v, ok := db.localCache[round] + if !ok { + return nil + } + return &v +} + +func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { + if prev.Round == 0 { + // TODO handle genesis better + return nil + } + if be := db.getCachedValue(curr.Round); be != nil { + // return no error if the value is in the cache already + return nil + } + b := &dchain.Beacon{ + PreviousSig: prev.Data, + Round: curr.Round, + Signature: curr.Data, + } + err := dchain.VerifyBeacon(db.pubkey, b) + if err == nil { + db.cacheValue(curr) + } + return err +} + +func (db *DrandBeacon) MaxBeaconRoundForEpoch(filEpoch abi.ChainEpoch) uint64 { + // TODO: sometimes the genesis time for filecoin is zero and this goes negative + latestTs := ((uint64(filEpoch) * db.filRoundTime) + db.filGenTime) - db.filRoundTime + dround := (latestTs - db.drandGenTime) / uint64(db.interval.Seconds()) + return dround +} + +var _ beacon.RandomBeacon = (*DrandBeacon)(nil) diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go new file mode 100644 index 000000000..0cb9c2ba8 --- /dev/null +++ b/chain/beacon/drand/drand_test.go @@ -0,0 +1,25 @@ +package drand + +import ( + "os" + "testing" + + dchain "github.com/drand/drand/chain" + hclient "github.com/drand/drand/client/http" + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/lotus/build" +) + +func TestPrintGroupInfo(t *testing.T) { + server := build.DrandConfigs[build.DrandIncentinet].Servers[0] + c, err := hclient.New(server, nil, nil) + assert.NoError(t, err) + cg := c.(interface { + FetchChainInfo(groupHash []byte) (*dchain.Info, error) + }) + chain, err := cg.FetchChainInfo(nil) + assert.NoError(t, err) + err = chain.ToJSON(os.Stdout) + assert.NoError(t, err) +} diff --git a/chain/beacon/mock.go b/chain/beacon/mock.go new file mode 100644 index 000000000..502ff2ba5 --- /dev/null +++ b/chain/beacon/mock.go @@ -0,0 +1,60 @@ +package beacon + +import ( + "bytes" + "context" + "encoding/binary" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + "github.com/minio/blake2b-simd" + "golang.org/x/xerrors" +) + +// Mock beacon assumes that filecoin rounds are 1:1 mapped with the beacon rounds +type mockBeacon struct { + interval time.Duration +} + +func NewMockBeacon(interval time.Duration) RandomBeacon { + mb := &mockBeacon{interval: interval} + + return mb +} + +func (mb *mockBeacon) RoundTime() time.Duration { + return mb.interval +} + +func (mb *mockBeacon) entryForIndex(index uint64) types.BeaconEntry { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, index) + rval := blake2b.Sum256(buf) + return types.BeaconEntry{ + Round: index, + Data: rval[:], + } +} + +func (mb *mockBeacon) Entry(ctx context.Context, index uint64) <-chan Response { + e := mb.entryForIndex(index) + out := make(chan Response, 1) + out <- Response{Entry: e} + return out +} + +func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry) error { + // TODO: cache this, especially for bls + oe := mb.entryForIndex(from.Round) + if !bytes.Equal(from.Data, oe.Data) { + return xerrors.Errorf("mock beacon entry was invalid!") + } + return nil +} + +func (mb *mockBeacon) MaxBeaconRoundForEpoch(epoch abi.ChainEpoch) uint64 { + return uint64(epoch) +} + +var _ RandomBeacon = (*mockBeacon)(nil) diff --git a/chain/block_receipt_tracker.go b/chain/block_receipt_tracker.go index f182fd180..a4a6743d1 100644 --- a/chain/block_receipt_tracker.go +++ b/chain/block_receipt_tracker.go @@ -5,9 +5,10 @@ import ( "sync" "time" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" - "github.com/hashicorp/golang-lru" - peer "github.com/libp2p/go-libp2p-core/peer" + lru "github.com/hashicorp/golang-lru" + "github.com/libp2p/go-libp2p-core/peer" ) type blockReceiptTracker struct { @@ -37,14 +38,14 @@ func (brt *blockReceiptTracker) Add(p peer.ID, ts *types.TipSet) { if !ok { pset := &peerSet{ peers: map[peer.ID]time.Time{ - p: time.Now(), + p: build.Clock.Now(), }, } brt.cache.Add(ts.Key(), pset) return } - val.(*peerSet).peers[p] = time.Now() + val.(*peerSet).peers[p] = build.Clock.Now() } func (brt *blockReceiptTracker) GetPeers(ts *types.TipSet) []peer.ID { diff --git a/chain/blocksync/blocksync.go b/chain/blocksync/blocksync.go deleted file mode 100644 index 429ad2ce1..000000000 --- a/chain/blocksync/blocksync.go +++ /dev/null @@ -1,238 +0,0 @@ -package blocksync - -import ( - "bufio" - "context" - - "github.com/libp2p/go-libp2p-core/protocol" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" -) - -var log = logging.Logger("blocksync") - -type NewStreamFunc func(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error) - -const BlockSyncProtocolID = "/fil/sync/blk/0.0.1" - -func init() { - cbor.RegisterCborType(BlockSyncRequest{}) - cbor.RegisterCborType(BlockSyncResponse{}) - cbor.RegisterCborType(BSTipSet{}) -} - -type BlockSyncService struct { - cs *store.ChainStore -} - -type BlockSyncRequest struct { - Start []cid.Cid - RequestLength uint64 - - Options uint64 -} - -type BSOptions struct { - IncludeBlocks bool - IncludeMessages bool -} - -func ParseBSOptions(optfield uint64) *BSOptions { - return &BSOptions{ - IncludeBlocks: optfield&(BSOptBlocks) != 0, - IncludeMessages: optfield&(BSOptMessages) != 0, - } -} - -const ( - BSOptBlocks = 1 << 0 - BSOptMessages = 1 << 1 -) - -type BlockSyncResponse struct { - Chain []*BSTipSet - - Status uint64 - Message string -} - -type BSTipSet struct { - Blocks []*types.BlockHeader - - BlsMessages []*types.Message - BlsMsgIncludes [][]uint64 - - SecpkMessages []*types.SignedMessage - SecpkMsgIncludes [][]uint64 -} - -func NewBlockSyncService(cs *store.ChainStore) *BlockSyncService { - return &BlockSyncService{ - cs: cs, - } -} - -func (bss *BlockSyncService) HandleStream(s inet.Stream) { - ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream") - defer span.End() - - defer s.Close() - - var req BlockSyncRequest - if err := cborutil.ReadCborRPC(bufio.NewReader(s), &req); err != nil { - log.Warnf("failed to read block sync request: %s", err) - return - } - log.Infof("block sync request for: %s %d", req.Start, req.RequestLength) - - resp, err := bss.processRequest(ctx, &req) - if err != nil { - log.Warn("failed to process block sync request: ", err) - return - } - - if err := cborutil.WriteCborRPC(s, resp); err != nil { - log.Warn("failed to write back response for handle stream: ", err) - return - } -} - -func (bss *BlockSyncService) processRequest(ctx context.Context, req *BlockSyncRequest) (*BlockSyncResponse, error) { - _, span := trace.StartSpan(ctx, "blocksync.ProcessRequest") - defer span.End() - - opts := ParseBSOptions(req.Options) - if len(req.Start) == 0 { - return &BlockSyncResponse{ - Status: 204, - Message: "no cids given in blocksync request", - }, nil - } - - span.AddAttributes( - trace.BoolAttribute("blocks", opts.IncludeBlocks), - trace.BoolAttribute("messages", opts.IncludeMessages), - ) - - chain, err := bss.collectChainSegment(types.NewTipSetKey(req.Start...), req.RequestLength, opts) - if err != nil { - log.Warn("encountered error while responding to block sync request: ", err) - return &BlockSyncResponse{ - Status: 203, - Message: err.Error(), - }, nil - } - - return &BlockSyncResponse{ - Chain: chain, - Status: 0, - }, nil -} - -func (bss *BlockSyncService) collectChainSegment(start types.TipSetKey, length uint64, opts *BSOptions) ([]*BSTipSet, error) { - var bstips []*BSTipSet - cur := start - for { - var bst BSTipSet - ts, err := bss.cs.LoadTipSet(cur) - if err != nil { - return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err) - } - - if opts.IncludeMessages { - bmsgs, bmincl, smsgs, smincl, err := bss.gatherMessages(ts) - if err != nil { - return nil, xerrors.Errorf("gather messages failed: %w", err) - } - - bst.BlsMessages = bmsgs - bst.BlsMsgIncludes = bmincl - bst.SecpkMessages = smsgs - bst.SecpkMsgIncludes = smincl - } - - if opts.IncludeBlocks { - bst.Blocks = ts.Blocks() - } - - bstips = append(bstips, &bst) - - if uint64(len(bstips)) >= length || ts.Height() == 0 { - return bstips, nil - } - - cur = ts.Parents() - } -} - -func (bss *BlockSyncService) gatherMessages(ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { - blsmsgmap := make(map[cid.Cid]uint64) - secpkmsgmap := make(map[cid.Cid]uint64) - var secpkmsgs []*types.SignedMessage - var blsmsgs []*types.Message - var secpkincl, blsincl [][]uint64 - - for _, b := range ts.Blocks() { - bmsgs, smsgs, err := bss.cs.MessagesForBlock(b) - if err != nil { - return nil, nil, nil, nil, err - } - - bmi := make([]uint64, 0, len(bmsgs)) - for _, m := range bmsgs { - i, ok := blsmsgmap[m.Cid()] - if !ok { - i = uint64(len(blsmsgs)) - blsmsgs = append(blsmsgs, m) - blsmsgmap[m.Cid()] = i - } - - bmi = append(bmi, i) - } - blsincl = append(blsincl, bmi) - - smi := make([]uint64, 0, len(smsgs)) - for _, m := range smsgs { - i, ok := secpkmsgmap[m.Cid()] - if !ok { - i = uint64(len(secpkmsgs)) - secpkmsgs = append(secpkmsgs, m) - secpkmsgmap[m.Cid()] = i - } - - smi = append(smi, i) - } - secpkincl = append(secpkincl, smi) - } - - return blsmsgs, blsincl, secpkmsgs, secpkincl, nil -} - -func bstsToFullTipSet(bts *BSTipSet) (*store.FullTipSet, error) { - fts := &store.FullTipSet{} - for i, b := range bts.Blocks { - fb := &types.FullBlock{ - Header: b, - } - for _, mi := range bts.BlsMsgIncludes[i] { - fb.BlsMessages = append(fb.BlsMessages, bts.BlsMessages[mi]) - } - for _, mi := range bts.SecpkMsgIncludes[i] { - fb.SecpkMessages = append(fb.SecpkMessages, bts.SecpkMessages[mi]) - } - - fts.Blocks = append(fts.Blocks, fb) - } - - return fts, nil -} diff --git a/chain/blocksync/blocksync_client.go b/chain/blocksync/blocksync_client.go deleted file mode 100644 index ef5e370c8..000000000 --- a/chain/blocksync/blocksync_client.go +++ /dev/null @@ -1,547 +0,0 @@ -package blocksync - -import ( - "bufio" - "context" - "fmt" - "math/rand" - "sort" - "sync" - "time" - - blocks "github.com/ipfs/go-block-format" - bserv "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - host "github.com/libp2p/go-libp2p-core/host" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/peermgr" -) - -type BlockSync struct { - bserv bserv.BlockService - host host.Host - - syncPeers *bsPeerTracker - peerMgr *peermgr.PeerMgr -} - -func NewBlockSyncClient(bserv dtypes.ChainBlockService, h host.Host, pmgr peermgr.MaybePeerMgr) *BlockSync { - return &BlockSync{ - bserv: bserv, - host: h, - syncPeers: newPeerTracker(pmgr.Mgr), - peerMgr: pmgr.Mgr, - } -} - -func (bs *BlockSync) processStatus(req *BlockSyncRequest, res *BlockSyncResponse) error { - switch res.Status { - case 101: // Partial Response - return xerrors.Errorf("not handling partial blocksync responses yet") - case 201: // req.Start not found - return xerrors.Errorf("not found") - case 202: // Go Away - return xerrors.Errorf("not handling 'go away' blocksync responses yet") - case 203: // Internal Error - return xerrors.Errorf("block sync peer errored: %s", res.Message) - case 204: - return xerrors.Errorf("block sync request invalid: %s", res.Message) - default: - return xerrors.Errorf("unrecognized response code: %d", res.Status) - } -} - -func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { - ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("tipset", fmt.Sprint(tsk.Cids())), - trace.Int64Attribute("count", int64(count)), - ) - } - - req := &BlockSyncRequest{ - Start: tsk.Cids(), - RequestLength: uint64(count), - Options: BSOptBlocks, - } - - peers := bs.getPeers() - // randomize the first few peers so we don't always pick the same peer - shufflePrefix(peers) - - start := time.Now() - var oerr error - - for _, p := range peers { - // TODO: doing this synchronously isnt great, but fetching in parallel - // may not be a good idea either. think about this more - select { - case <-ctx.Done(): - return nil, xerrors.Errorf("blocksync getblocks failed: %w", ctx.Err()) - default: - } - - res, err := bs.sendRequestToPeer(ctx, p, req) - if err != nil { - oerr = err - if !xerrors.Is(err, inet.ErrNoConn) { - log.Warnf("BlockSync request failed for peer %s: %s", p.String(), err) - } - continue - } - - if res.Status == 0 { - resp, err := bs.processBlocksResponse(req, res) - if err != nil { - return nil, xerrors.Errorf("success response from peer failed to process: %w", err) - } - bs.syncPeers.logGlobalSuccess(time.Since(start)) - bs.host.ConnManager().TagPeer(p, "bsync", 25) - return resp, nil - } - oerr = bs.processStatus(req, res) - if oerr != nil { - log.Warnf("BlockSync peer %s response was an error: %s", p.String(), oerr) - } - } - return nil, xerrors.Errorf("GetBlocks failed with all peers: %w", oerr) -} - -func (bs *BlockSync) GetFullTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { - // TODO: round robin through these peers on error - - req := &BlockSyncRequest{ - Start: tsk.Cids(), - RequestLength: 1, - Options: BSOptBlocks | BSOptMessages, - } - - res, err := bs.sendRequestToPeer(ctx, p, req) - if err != nil { - return nil, err - } - - switch res.Status { - case 0: // Success - if len(res.Chain) == 0 { - return nil, fmt.Errorf("got zero length chain response") - } - bts := res.Chain[0] - - return bstsToFullTipSet(bts) - case 101: // Partial Response - return nil, xerrors.Errorf("partial responses are not handled") - case 201: // req.Start not found - return nil, fmt.Errorf("not found") - case 202: // Go Away - return nil, xerrors.Errorf("received 'go away' response peer") - case 203: // Internal Error - return nil, fmt.Errorf("block sync peer errored: %q", res.Message) - case 204: // Invalid Request - return nil, fmt.Errorf("block sync request invalid: %q", res.Message) - default: - return nil, fmt.Errorf("unrecognized response code") - } -} - -func shufflePrefix(peers []peer.ID) { - pref := 5 - if len(peers) < pref { - pref = len(peers) - } - - buf := make([]peer.ID, pref) - perm := rand.Perm(pref) - for i, v := range perm { - buf[i] = peers[v] - } - - copy(peers, buf) -} - -func (bs *BlockSync) GetChainMessages(ctx context.Context, h *types.TipSet, count uint64) ([]*BSTipSet, error) { - ctx, span := trace.StartSpan(ctx, "GetChainMessages") - defer span.End() - - peers := bs.getPeers() - // randomize the first few peers so we don't always pick the same peer - shufflePrefix(peers) - - req := &BlockSyncRequest{ - Start: h.Cids(), - RequestLength: count, - Options: BSOptMessages | BSOptBlocks, - } - - var err error - start := time.Now() - - for _, p := range peers { - res, rerr := bs.sendRequestToPeer(ctx, p, req) - if rerr != nil { - err = rerr - log.Warnf("BlockSync request failed for peer %s: %s", p.String(), err) - continue - } - - if res.Status == 0 { - bs.syncPeers.logGlobalSuccess(time.Since(start)) - return res.Chain, nil - } - - err = bs.processStatus(req, res) - if err != nil { - log.Warnf("BlockSync peer %s response was an error: %s", p.String(), err) - } - } - - if err == nil { - return nil, xerrors.Errorf("GetChainMessages failed, no peers connected") - } - - // TODO: What if we have no peers (and err is nil)? - return nil, xerrors.Errorf("GetChainMessages failed with all peers(%d): %w", len(peers), err) -} - -func (bs *BlockSync) sendRequestToPeer(ctx context.Context, p peer.ID, req *BlockSyncRequest) (_ *BlockSyncResponse, err error) { - ctx, span := trace.StartSpan(ctx, "sendRequestToPeer") - defer span.End() - - defer func() { - if err != nil { - if span.IsRecordingEvents() { - span.SetStatus(trace.Status{ - Code: 5, - Message: err.Error(), - }) - } - } - }() - - start := time.Now() - - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("peer", p.Pretty()), - ) - } - - s, err := bs.host.NewStream(inet.WithNoDial(ctx, "should already have connection"), p, BlockSyncProtocolID) - if err != nil { - bs.RemovePeer(p) - return nil, xerrors.Errorf("failed to open stream to peer: %w", err) - } - s.SetDeadline(time.Now().Add(10 * time.Second)) - defer s.SetDeadline(time.Time{}) - - if err := cborutil.WriteCborRPC(s, req); err != nil { - bs.syncPeers.logFailure(p, time.Since(start)) - return nil, err - } - - var res BlockSyncResponse - if err := cborutil.ReadCborRPC(bufio.NewReader(s), &res); err != nil { - bs.syncPeers.logFailure(p, time.Since(start)) - return nil, err - } - - if span.IsRecordingEvents() { - span.AddAttributes( - trace.Int64Attribute("resp_status", int64(res.Status)), - trace.StringAttribute("msg", res.Message), - trace.Int64Attribute("chain_len", int64(len(res.Chain))), - ) - } - - bs.syncPeers.logSuccess(p, time.Since(start)) - - return &res, nil -} - -func (bs *BlockSync) processBlocksResponse(req *BlockSyncRequest, res *BlockSyncResponse) ([]*types.TipSet, error) { - if len(res.Chain) == 0 { - return nil, xerrors.Errorf("got no blocks in successful blocksync response") - } - - cur, err := types.NewTipSet(res.Chain[0].Blocks) - if err != nil { - return nil, err - } - - out := []*types.TipSet{cur} - for bi := 1; bi < len(res.Chain); bi++ { - next := res.Chain[bi].Blocks - nts, err := types.NewTipSet(next) - if err != nil { - return nil, err - } - - if !types.CidArrsEqual(cur.Parents().Cids(), nts.Cids()) { - return nil, fmt.Errorf("parents of tipset[%d] were not tipset[%d]", bi-1, bi) - } - - out = append(out, nts) - cur = nts - } - return out, nil -} - -func (bs *BlockSync) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) { - sb, err := bs.bserv.GetBlock(ctx, c) - if err != nil { - return nil, err - } - - return types.DecodeBlock(sb.RawData()) -} - -func (bs *BlockSync) AddPeer(p peer.ID) { - bs.syncPeers.addPeer(p) -} - -func (bs *BlockSync) RemovePeer(p peer.ID) { - bs.syncPeers.removePeer(p) -} - -func (bs *BlockSync) getPeers() []peer.ID { - return bs.syncPeers.prefSortedPeers() -} - -func (bs *BlockSync) FetchMessagesByCids(ctx context.Context, cids []cid.Cid) ([]*types.Message, error) { - out := make([]*types.Message, len(cids)) - - err := bs.fetchCids(ctx, cids, func(i int, b blocks.Block) error { - msg, err := types.DecodeMessage(b.RawData()) - if err != nil { - return err - } - - if out[i] != nil { - return fmt.Errorf("received duplicate message") - } - - out[i] = msg - return nil - }) - if err != nil { - return nil, err - } - return out, nil -} - -func (bs *BlockSync) FetchSignedMessagesByCids(ctx context.Context, cids []cid.Cid) ([]*types.SignedMessage, error) { - out := make([]*types.SignedMessage, len(cids)) - - err := bs.fetchCids(ctx, cids, func(i int, b blocks.Block) error { - smsg, err := types.DecodeSignedMessage(b.RawData()) - if err != nil { - return err - } - - if out[i] != nil { - return fmt.Errorf("received duplicate message") - } - - out[i] = smsg - return nil - }) - if err != nil { - return nil, err - } - return out, nil -} - -func (bs *BlockSync) fetchCids(ctx context.Context, cids []cid.Cid, cb func(int, blocks.Block) error) error { - resp := bs.bserv.GetBlocks(context.TODO(), cids) - - m := make(map[cid.Cid]int) - for i, c := range cids { - m[c] = i - } - - for i := 0; i < len(cids); i++ { - select { - case v, ok := <-resp: - if !ok { - if i == len(cids)-1 { - break - } - - return fmt.Errorf("failed to fetch all messages") - } - - ix, ok := m[v.Cid()] - if !ok { - return fmt.Errorf("received message we didnt ask for") - } - - if err := cb(ix, v); err != nil { - return err - } - } - } - - return nil -} - -type peerStats struct { - successes int - failures int - firstSeen time.Time - averageTime time.Duration -} - -type bsPeerTracker struct { - lk sync.Mutex - - peers map[peer.ID]*peerStats - avgGlobalTime time.Duration - - pmgr *peermgr.PeerMgr -} - -func newPeerTracker(pmgr *peermgr.PeerMgr) *bsPeerTracker { - return &bsPeerTracker{ - peers: make(map[peer.ID]*peerStats), - pmgr: pmgr, - } -} - -func (bpt *bsPeerTracker) addPeer(p peer.ID) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - if _, ok := bpt.peers[p]; ok { - return - } - bpt.peers[p] = &peerStats{ - firstSeen: time.Now(), - } - -} - -const ( - // newPeerMul is how much better than average is the new peer assumed to be - // less than one to encourouge trying new peers - newPeerMul = 0.9 -) - -func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID { - // TODO: this could probably be cached, but as long as its not too many peers, fine for now - bpt.lk.Lock() - defer bpt.lk.Unlock() - out := make([]peer.ID, 0, len(bpt.peers)) - for p := range bpt.peers { - out = append(out, p) - } - - // sort by 'expected cost' of requesting data from that peer - // additionally handle edge cases where not enough data is available - sort.Slice(out, func(i, j int) bool { - pi := bpt.peers[out[i]] - pj := bpt.peers[out[j]] - - var costI, costJ float64 - - getPeerInitLat := func(p peer.ID) float64 { - var res float64 - if bpt.pmgr != nil { - if lat, ok := bpt.pmgr.GetPeerLatency(out[i]); ok { - res = float64(lat) - } - } - if res == 0 { - res = float64(bpt.avgGlobalTime) - } - return res * newPeerMul - } - - if pi.successes+pi.failures > 0 { - failRateI := float64(pi.failures) / float64(pi.failures+pi.successes) - costI = float64(pi.averageTime) + failRateI*float64(bpt.avgGlobalTime) - } else { - costI = getPeerInitLat(out[i]) - } - - if pj.successes+pj.failures > 0 { - failRateJ := float64(pj.failures) / float64(pj.failures+pj.successes) - costJ = float64(pj.averageTime) + failRateJ*float64(bpt.avgGlobalTime) - } else { - costI = getPeerInitLat(out[i]) - } - - return costI < costJ - }) - - return out -} - -const ( - // xInvAlpha = (N+1)/2 - - localInvAlpha = 5 // 86% of the value is the last 9 - globalInvAlpha = 20 // 86% of the value is the last 39 -) - -func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - - if bpt.avgGlobalTime == 0 { - bpt.avgGlobalTime = dur - return - } - delta := (dur - bpt.avgGlobalTime) / globalInvAlpha - bpt.avgGlobalTime += delta -} - -func logTime(pi *peerStats, dur time.Duration) { - if pi.averageTime == 0 { - pi.averageTime = dur - return - } - delta := (dur - pi.averageTime) / localInvAlpha - pi.averageTime += delta - -} - -func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - - if pi, ok := bpt.peers[p]; !ok { - log.Warnw("log success called on peer not in tracker", "peerid", p.String()) - return - } else { - pi.successes++ - - logTime(pi, dur) - } -} - -func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - if pi, ok := bpt.peers[p]; !ok { - log.Warn("log failure called on peer not in tracker", "peerid", p.String()) - return - } else { - pi.failures++ - logTime(pi, dur) - } -} - -func (bpt *bsPeerTracker) removePeer(p peer.ID) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - delete(bpt.peers, p) -} diff --git a/chain/blocksync/cbor_gen.go b/chain/blocksync/cbor_gen.go deleted file mode 100644 index 896d8f961..000000000 --- a/chain/blocksync/cbor_gen.go +++ /dev/null @@ -1,529 +0,0 @@ -package blocksync - -import ( - "fmt" - "io" - - "github.com/filecoin-project/lotus/chain/types" - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -var _ = xerrors.Errorf - -func (t *BlockSyncRequest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Start ([]cid.Cid) (slice) - if len(t.Start) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Start was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Start)))); err != nil { - return err - } - for _, v := range t.Start { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Start: %w", err) - } - } - - // t.RequestLength (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.RequestLength))); err != nil { - return err - } - - // t.Options (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Options))); err != nil { - return err - } - return nil -} - -func (t *BlockSyncRequest) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Start ([]cid.Cid) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Start: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Start = make([]cid.Cid, extra) - } - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("reading cid field t.Start failed: %w", err) - } - t.Start[i] = c - } - - // t.RequestLength (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.RequestLength = uint64(extra) - // t.Options (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Options = uint64(extra) - return nil -} - -func (t *BlockSyncResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Chain ([]*blocksync.BSTipSet) (slice) - if len(t.Chain) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Chain was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Chain)))); err != nil { - return err - } - for _, v := range t.Chain { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Status (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Status))); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Message)); err != nil { - return err - } - return nil -} - -func (t *BlockSyncResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Chain ([]*blocksync.BSTipSet) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Chain: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Chain = make([]*BSTipSet, extra) - } - for i := 0; i < int(extra); i++ { - - var v BSTipSet - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Chain[i] = &v - } - - // t.Status (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = uint64(extra) - // t.Message (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Message = string(sval) - } - return nil -} - -func (t *BSTipSet) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{133}); err != nil { - return err - } - - // t.Blocks ([]*types.BlockHeader) (slice) - if len(t.Blocks) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Blocks was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Blocks)))); err != nil { - return err - } - for _, v := range t.Blocks { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.BlsMessages ([]*types.Message) (slice) - if len(t.BlsMessages) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.BlsMessages was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.BlsMessages)))); err != nil { - return err - } - for _, v := range t.BlsMessages { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.BlsMsgIncludes ([][]uint64) (slice) - if len(t.BlsMsgIncludes) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.BlsMsgIncludes was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.BlsMsgIncludes)))); err != nil { - return err - } - for _, v := range t.BlsMsgIncludes { - if len(v) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field v was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(v)))); err != nil { - return err - } - for _, v := range v { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - } - - // t.SecpkMessages ([]*types.SignedMessage) (slice) - if len(t.SecpkMessages) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.SecpkMessages was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.SecpkMessages)))); err != nil { - return err - } - for _, v := range t.SecpkMessages { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.SecpkMsgIncludes ([][]uint64) (slice) - if len(t.SecpkMsgIncludes) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.SecpkMsgIncludes was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.SecpkMsgIncludes)))); err != nil { - return err - } - for _, v := range t.SecpkMsgIncludes { - if len(v) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field v was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(v)))); err != nil { - return err - } - for _, v := range v { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, v); err != nil { - return err - } - } - } - return nil -} - -func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Blocks ([]*types.BlockHeader) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Blocks: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Blocks = make([]*types.BlockHeader, extra) - } - for i := 0; i < int(extra); i++ { - - var v types.BlockHeader - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Blocks[i] = &v - } - - // t.BlsMessages ([]*types.Message) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.BlsMessages: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.BlsMessages = make([]*types.Message, extra) - } - for i := 0; i < int(extra); i++ { - - var v types.Message - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.BlsMessages[i] = &v - } - - // t.BlsMsgIncludes ([][]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.BlsMsgIncludes: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.BlsMsgIncludes = make([][]uint64, extra) - } - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.BlsMsgIncludes[i]: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.BlsMsgIncludes[i] = make([]uint64, extra) - } - for j := 0; j < int(extra); j++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.BlsMsgIncludes[i] slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.BlsMsgIncludes[i] was not a uint, instead got %d", maj) - } - - t.BlsMsgIncludes[i][j] = val - } - - } - } - - // t.SecpkMessages ([]*types.SignedMessage) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.SecpkMessages: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.SecpkMessages = make([]*types.SignedMessage, extra) - } - for i := 0; i < int(extra); i++ { - - var v types.SignedMessage - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.SecpkMessages[i] = &v - } - - // t.SecpkMsgIncludes ([][]uint64) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.SecpkMsgIncludes: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.SecpkMsgIncludes = make([][]uint64, extra) - } - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.SecpkMsgIncludes[i]: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.SecpkMsgIncludes[i] = make([]uint64, extra) - } - for j := 0; j < int(extra); j++ { - - maj, val, err := cbg.CborReadHeader(br) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.SecpkMsgIncludes[i] slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.SecpkMsgIncludes[i] was not a uint, instead got %d", maj) - } - - t.SecpkMsgIncludes[i][j] = val - } - - } - } - - return nil -} diff --git a/chain/checkpoint.go b/chain/checkpoint.go new file mode 100644 index 000000000..8f99d73e4 --- /dev/null +++ b/chain/checkpoint.go @@ -0,0 +1,81 @@ +package chain + +import ( + "encoding/json" + + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/ipfs/go-datastore" + "golang.org/x/xerrors" +) + +var CheckpointKey = datastore.NewKey("/chain/checks") + +func loadCheckpoint(ds dtypes.MetadataDS) (types.TipSetKey, error) { + haveChks, err := ds.Has(CheckpointKey) + if err != nil { + return types.EmptyTSK, err + } + + if !haveChks { + return types.EmptyTSK, nil + } + + tskBytes, err := ds.Get(CheckpointKey) + if err != nil { + return types.EmptyTSK, err + } + + var tsk types.TipSetKey + err = json.Unmarshal(tskBytes, &tsk) + if err != nil { + return types.EmptyTSK, err + } + + return tsk, err +} + +func (syncer *Syncer) SetCheckpoint(tsk types.TipSetKey) error { + if tsk == types.EmptyTSK { + return xerrors.Errorf("called with empty tsk") + } + + syncer.checkptLk.Lock() + defer syncer.checkptLk.Unlock() + + ts, err := syncer.ChainStore().LoadTipSet(tsk) + if err != nil { + return xerrors.Errorf("cannot find tipset: %w", err) + } + + hts := syncer.ChainStore().GetHeaviestTipSet() + anc, err := syncer.ChainStore().IsAncestorOf(ts, hts) + if err != nil { + return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) + } + + if !hts.Equals(ts) && !anc { + return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) + } + + tskBytes, err := json.Marshal(tsk) + if err != nil { + return err + } + + err = syncer.ds.Put(CheckpointKey, tskBytes) + if err != nil { + return err + } + + syncer.checkpt = tsk + + return nil +} + +func (syncer *Syncer) GetCheckpoint() types.TipSetKey { + syncer.checkptLk.Lock() + defer syncer.checkptLk.Unlock() + return syncer.checkpt +} diff --git a/chain/deals/cbor_gen.go b/chain/deals/cbor_gen.go deleted file mode 100644 index 2a5f1f8a9..000000000 --- a/chain/deals/cbor_gen.go +++ /dev/null @@ -1,883 +0,0 @@ -package deals - -import ( - "fmt" - "io" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -var _ = xerrors.Errorf - -func (t *AskRequest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Miner (address.Address) (struct) - if err := t.Miner.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *AskRequest) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Miner (address.Address) (struct) - - { - - if err := t.Miner.UnmarshalCBOR(br); err != nil { - return err - } - - } - return nil -} - -func (t *AskResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Ask (types.SignedStorageAsk) (struct) - if err := t.Ask.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *AskResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Ask (types.SignedStorageAsk) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Ask = new(types.SignedStorageAsk) - if err := t.Ask.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *Proposal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.DealProposal (actors.StorageDealProposal) (struct) - if err := t.DealProposal.MarshalCBOR(w); err != nil { - return err - } - - // t.Piece (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Piece); err != nil { - return xerrors.Errorf("failed to write cid field t.Piece: %w", err) - } - - return nil -} - -func (t *Proposal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealProposal (actors.StorageDealProposal) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.DealProposal = new(actors.StorageDealProposal) - if err := t.DealProposal.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - // t.Piece (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Piece: %w", err) - } - - t.Piece = c - - } - return nil -} - -func (t *Response) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.State (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.State))); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Message)); err != nil { - return err - } - - // t.Proposal (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Proposal); err != nil { - return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) - } - - // t.StorageDealSubmission (types.SignedMessage) (struct) - if err := t.StorageDealSubmission.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Response) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.State (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.State = uint64(extra) - // t.Message (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Message = string(sval) - } - // t.Proposal (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) - } - - t.Proposal = c - - } - // t.StorageDealSubmission (types.SignedMessage) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.StorageDealSubmission = new(types.SignedMessage) - if err := t.StorageDealSubmission.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *SignedResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Response (deals.Response) (struct) - if err := t.Response.MarshalCBOR(w); err != nil { - return err - } - - // t.Signature (types.Signature) (struct) - if err := t.Signature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *SignedResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Response (deals.Response) (struct) - - { - - if err := t.Response.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Signature (types.Signature) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Signature = new(types.Signature) - if err := t.Signature.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *ClientDealProposal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{136}); err != nil { - return err - } - - // t.Data (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Data); err != nil { - return xerrors.Errorf("failed to write cid field t.Data: %w", err) - } - - // t.PricePerEpoch (types.BigInt) (struct) - if err := t.PricePerEpoch.MarshalCBOR(w); err != nil { - return err - } - - // t.ProposalExpiration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ProposalExpiration))); err != nil { - return err - } - - // t.Duration (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Duration))); err != nil { - return err - } - - // t.ProviderAddress (address.Address) (struct) - if err := t.ProviderAddress.MarshalCBOR(w); err != nil { - return err - } - - // t.Client (address.Address) (struct) - if err := t.Client.MarshalCBOR(w); err != nil { - return err - } - - // t.MinerWorker (address.Address) (struct) - if err := t.MinerWorker.MarshalCBOR(w); err != nil { - return err - } - - // t.MinerID (peer.ID) (string) - if len(t.MinerID) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.MinerID was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.MinerID)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.MinerID)); err != nil { - return err - } - return nil -} - -func (t *ClientDealProposal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 8 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Data (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Data: %w", err) - } - - t.Data = c - - } - // t.PricePerEpoch (types.BigInt) (struct) - - { - - if err := t.PricePerEpoch.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ProposalExpiration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ProposalExpiration = uint64(extra) - // t.Duration (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Duration = uint64(extra) - // t.ProviderAddress (address.Address) (struct) - - { - - if err := t.ProviderAddress.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Client (address.Address) (struct) - - { - - if err := t.Client.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.MinerWorker (address.Address) (struct) - - { - - if err := t.MinerWorker.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.MinerID (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.MinerID = peer.ID(sval) - } - return nil -} - -func (t *ClientDeal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{135}); err != nil { - return err - } - - // t.ProposalCid (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.ProposalCid); err != nil { - return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) - } - - // t.Proposal (actors.StorageDealProposal) (struct) - if err := t.Proposal.MarshalCBOR(w); err != nil { - return err - } - - // t.State (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.State))); err != nil { - return err - } - - // t.Miner (peer.ID) (string) - if len(t.Miner) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Miner was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Miner)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Miner)); err != nil { - return err - } - - // t.MinerWorker (address.Address) (struct) - if err := t.MinerWorker.MarshalCBOR(w); err != nil { - return err - } - - // t.DealID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil { - return err - } - - // t.PublishMessage (types.SignedMessage) (struct) - if err := t.PublishMessage.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *ClientDeal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.ProposalCid (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) - } - - t.ProposalCid = c - - } - // t.Proposal (actors.StorageDealProposal) (struct) - - { - - if err := t.Proposal.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.State (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.State = uint64(extra) - // t.Miner (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Miner = peer.ID(sval) - } - // t.MinerWorker (address.Address) (struct) - - { - - if err := t.MinerWorker.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.DealID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = uint64(extra) - // t.PublishMessage (types.SignedMessage) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.PublishMessage = new(types.SignedMessage) - if err := t.PublishMessage.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *MinerDeal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{135}); err != nil { - return err - } - - // t.Client (peer.ID) (string) - if len(t.Client) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Client was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Client)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Client)); err != nil { - return err - } - - // t.Proposal (actors.StorageDealProposal) (struct) - if err := t.Proposal.MarshalCBOR(w); err != nil { - return err - } - - // t.ProposalCid (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.ProposalCid); err != nil { - return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) - } - - // t.State (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.State))); err != nil { - return err - } - - // t.Ref (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Ref); err != nil { - return xerrors.Errorf("failed to write cid field t.Ref: %w", err) - } - - // t.DealID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil { - return err - } - - // t.SectorID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorID))); err != nil { - return err - } - return nil -} - -func (t *MinerDeal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Client (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Client = peer.ID(sval) - } - // t.Proposal (actors.StorageDealProposal) (struct) - - { - - if err := t.Proposal.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.ProposalCid (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) - } - - t.ProposalCid = c - - } - // t.State (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.State = uint64(extra) - // t.Ref (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Ref: %w", err) - } - - t.Ref = c - - } - // t.DealID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = uint64(extra) - // t.SectorID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = uint64(extra) - return nil -} - -func (t *StorageDataTransferVoucher) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Proposal (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Proposal); err != nil { - return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) - } - - // t.DealID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil { - return err - } - return nil -} - -func (t *StorageDataTransferVoucher) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Proposal (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) - } - - t.Proposal = c - - } - // t.DealID (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = uint64(extra) - return nil -} diff --git a/chain/deals/client.go b/chain/deals/client.go deleted file mode 100644 index 709432f63..000000000 --- a/chain/deals/client.go +++ /dev/null @@ -1,311 +0,0 @@ -package deals - -import ( - "context" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" - "github.com/libp2p/go-libp2p-core/host" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/retrieval/discovery" -) - -var log = logging.Logger("deals") - -type ClientDeal struct { - ProposalCid cid.Cid - Proposal actors.StorageDealProposal - State api.DealState - Miner peer.ID - MinerWorker address.Address - DealID uint64 - - PublishMessage *types.SignedMessage - - s inet.Stream -} - -type Client struct { - sm *stmgr.StateManager - chain *store.ChainStore - h host.Host - w *wallet.Wallet - // dataTransfer - // TODO: once the data transfer module is complete, the - // client will listen to events on the data transfer module - // Because we are using only a fake DAGService - // implementation, there's no validation or events on the client side - dataTransfer dtypes.ClientDataTransfer - dag dtypes.ClientDAG - discovery *discovery.Local - events *events.Events - fm *market.FundMgr - - deals *statestore.StateStore - conns map[cid.Cid]inet.Stream - - incoming chan *ClientDeal - updated chan clientDealUpdate - - stop chan struct{} - stopped chan struct{} -} - -type clientDealUpdate struct { - newState api.DealState - id cid.Cid - err error - mut func(*ClientDeal) -} - -type clientApi struct { - full.ChainAPI - full.StateAPI -} - -func NewClient(sm *stmgr.StateManager, chain *store.ChainStore, h host.Host, w *wallet.Wallet, dag dtypes.ClientDAG, dataTransfer dtypes.ClientDataTransfer, discovery *discovery.Local, fm *market.FundMgr, deals dtypes.ClientDealStore, chainapi full.ChainAPI, stateapi full.StateAPI) *Client { - c := &Client{ - sm: sm, - chain: chain, - h: h, - w: w, - dataTransfer: dataTransfer, - dag: dag, - discovery: discovery, - fm: fm, - events: events.NewEvents(context.TODO(), &clientApi{chainapi, stateapi}), - - deals: deals, - conns: map[cid.Cid]inet.Stream{}, - - incoming: make(chan *ClientDeal, 16), - updated: make(chan clientDealUpdate, 16), - - stop: make(chan struct{}), - stopped: make(chan struct{}), - } - - return c -} - -func (c *Client) Run(ctx context.Context) { - go func() { - defer close(c.stopped) - - for { - select { - case deal := <-c.incoming: - c.onIncoming(deal) - case update := <-c.updated: - c.onUpdated(ctx, update) - case <-c.stop: - return - } - } - }() -} - -func (c *Client) onIncoming(deal *ClientDeal) { - log.Info("incoming deal") - - if _, ok := c.conns[deal.ProposalCid]; ok { - log.Errorf("tracking deal connection: already tracking connection for deal %s", deal.ProposalCid) - return - } - c.conns[deal.ProposalCid] = deal.s - - if err := c.deals.Begin(deal.ProposalCid, deal); err != nil { - // We may have re-sent the proposal - log.Errorf("deal tracking failed: %s", err) - c.failDeal(deal.ProposalCid, err) - return - } - - go func() { - c.updated <- clientDealUpdate{ - newState: api.DealUnknown, - id: deal.ProposalCid, - err: nil, - } - }() -} - -func (c *Client) onUpdated(ctx context.Context, update clientDealUpdate) { - log.Infof("Client deal %s updated state to %s", update.id, api.DealStates[update.newState]) - var deal ClientDeal - err := c.deals.Mutate(update.id, func(d *ClientDeal) error { - d.State = update.newState - if update.mut != nil { - update.mut(d) - } - deal = *d - return nil - }) - if update.err != nil { - log.Errorf("deal %s failed: %s", update.id, update.err) - c.failDeal(update.id, update.err) - return - } - if err != nil { - c.failDeal(update.id, err) - return - } - - switch update.newState { - case api.DealUnknown: // new - c.handle(ctx, deal, c.new, api.DealAccepted) - case api.DealAccepted: - c.handle(ctx, deal, c.accepted, api.DealStaged) - case api.DealStaged: - c.handle(ctx, deal, c.staged, api.DealSealing) - case api.DealSealing: - c.handle(ctx, deal, c.sealing, api.DealNoUpdate) - // TODO: DealComplete -> watch for faults, expiration, etc. - } -} - -type ClientDealProposal struct { - Data cid.Cid - - PricePerEpoch types.BigInt - ProposalExpiration uint64 - Duration uint64 - - ProviderAddress address.Address - Client address.Address - MinerWorker address.Address - MinerID peer.ID -} - -func (c *Client) Start(ctx context.Context, p ClientDealProposal) (cid.Cid, error) { - if err := c.fm.EnsureAvailable(ctx, p.Client, types.BigMul(p.PricePerEpoch, types.NewInt(p.Duration))); err != nil { - return cid.Undef, xerrors.Errorf("adding market funds failed: %w", err) - } - - commP, pieceSize, err := c.commP(ctx, p.Data) - if err != nil { - return cid.Undef, xerrors.Errorf("computing commP failed: %w", err) - } - - dealProposal := &actors.StorageDealProposal{ - PieceRef: commP, - PieceSize: uint64(pieceSize), - Client: p.Client, - Provider: p.ProviderAddress, - ProposalExpiration: p.ProposalExpiration, - Duration: p.Duration, - StoragePricePerEpoch: p.PricePerEpoch, - StorageCollateral: types.NewInt(uint64(pieceSize)), // TODO: real calc - } - - if err := api.SignWith(ctx, c.w.Sign, p.Client, dealProposal); err != nil { - return cid.Undef, xerrors.Errorf("signing deal proposal failed: %w", err) - } - - proposalNd, err := cborutil.AsIpld(dealProposal) - if err != nil { - return cid.Undef, xerrors.Errorf("getting proposal node failed: %w", err) - } - - s, err := c.h.NewStream(ctx, p.MinerID, DealProtocolID) - if err != nil { - return cid.Undef, xerrors.Errorf("connecting to storage provider failed: %w", err) - } - - proposal := &Proposal{ - DealProposal: dealProposal, - Piece: p.Data, - } - - if err := cborutil.WriteCborRPC(s, proposal); err != nil { - s.Reset() - return cid.Undef, xerrors.Errorf("sending proposal to storage provider failed: %w", err) - } - - deal := &ClientDeal{ - ProposalCid: proposalNd.Cid(), - Proposal: *dealProposal, - State: api.DealUnknown, - Miner: p.MinerID, - MinerWorker: p.MinerWorker, - - s: s, - } - - c.incoming <- deal - - return deal.ProposalCid, c.discovery.AddPeer(p.Data, discovery.RetrievalPeer{ - Address: dealProposal.Provider, - ID: deal.Miner, - }) -} - -func (c *Client) QueryAsk(ctx context.Context, p peer.ID, a address.Address) (*types.SignedStorageAsk, error) { - s, err := c.h.NewStream(ctx, p, AskProtocolID) - if err != nil { - return nil, xerrors.Errorf("failed to open stream to miner: %w", err) - } - - req := &AskRequest{ - Miner: a, - } - if err := cborutil.WriteCborRPC(s, req); err != nil { - return nil, xerrors.Errorf("failed to send ask request: %w", err) - } - - var out AskResponse - if err := cborutil.ReadCborRPC(s, &out); err != nil { - return nil, xerrors.Errorf("failed to read ask response: %w", err) - } - - if out.Ask == nil { - return nil, xerrors.Errorf("got no ask back") - } - - if out.Ask.Ask.Miner != a { - return nil, xerrors.Errorf("got back ask for wrong miner") - } - - if err := c.checkAskSignature(out.Ask); err != nil { - return nil, xerrors.Errorf("ask was not properly signed") - } - - return out.Ask, nil -} - -func (c *Client) List() ([]ClientDeal, error) { - var out []ClientDeal - if err := c.deals.List(&out); err != nil { - return nil, err - } - return out, nil -} - -func (c *Client) GetDeal(d cid.Cid) (*ClientDeal, error) { - var out ClientDeal - if err := c.deals.Get(d, &out); err != nil { - return nil, err - } - return &out, nil -} - -func (c *Client) Stop() { - close(c.stop) - <-c.stopped -} diff --git a/chain/deals/client_states.go b/chain/deals/client_states.go deleted file mode 100644 index d19765f60..000000000 --- a/chain/deals/client_states.go +++ /dev/null @@ -1,233 +0,0 @@ -package deals - -import ( - "bytes" - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" -) - -type clientHandlerFunc func(ctx context.Context, deal ClientDeal) (func(*ClientDeal), error) - -func (c *Client) handle(ctx context.Context, deal ClientDeal, cb clientHandlerFunc, next api.DealState) { - go func() { - mut, err := cb(ctx, deal) - if err != nil { - next = api.DealError - } - - if err == nil && next == api.DealNoUpdate { - return - } - - select { - case c.updated <- clientDealUpdate{ - newState: next, - id: deal.ProposalCid, - err: err, - mut: mut, - }: - case <-c.stop: - } - }() -} - -func (c *Client) new(ctx context.Context, deal ClientDeal) (func(*ClientDeal), error) { - resp, err := c.readStorageDealResp(deal) - if err != nil { - return nil, err - } - - // TODO: verify StorageDealSubmission - - if err := c.disconnect(deal); err != nil { - return nil, err - } - - /* data transfer happens */ - if resp.State != api.DealAccepted { - return nil, xerrors.Errorf("deal wasn't accepted (State=%d)", resp.State) - } - - return func(info *ClientDeal) { - info.PublishMessage = resp.StorageDealSubmission - }, nil -} - -func (c *Client) accepted(ctx context.Context, deal ClientDeal) (func(*ClientDeal), error) { - log.Infow("DEAL ACCEPTED!") - - pubmsg := deal.PublishMessage.Message - pw, err := stmgr.GetMinerWorker(ctx, c.sm, nil, deal.Proposal.Provider) - if err != nil { - return nil, xerrors.Errorf("getting miner worker failed: %w", err) - } - - if pubmsg.From != pw { - return nil, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s", pubmsg.From, deal.Proposal.Provider) - } - - if pubmsg.To != actors.StorageMarketAddress { - return nil, xerrors.Errorf("deal publish message wasn't set to StorageMarket actor (to=%s)", pubmsg.To) - } - - if pubmsg.Method != actors.SMAMethods.PublishStorageDeals { - return nil, xerrors.Errorf("deal publish message called incorrect method (method=%s)", pubmsg.Method) - } - - var params actors.PublishStorageDealsParams - if err := params.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil { - return nil, err - } - - dealIdx := -1 - for i, storageDeal := range params.Deals { - // TODO: make it less hacky - sd := storageDeal - eq, err := cborutil.Equals(&deal.Proposal, &sd) - if err != nil { - return nil, err - } - if eq { - dealIdx = i - break - } - } - - if dealIdx == -1 { - return nil, xerrors.Errorf("deal publish didn't contain our deal (message cid: %s)", deal.PublishMessage.Cid()) - } - - // TODO: timeout - _, ret, err := c.sm.WaitForMessage(ctx, deal.PublishMessage.Cid()) - if err != nil { - return nil, xerrors.Errorf("waiting for deal publish message: %w", err) - } - if ret.ExitCode != 0 { - return nil, xerrors.Errorf("deal publish failed: exit=%d", ret.ExitCode) - } - - var res actors.PublishStorageDealResponse - if err := res.UnmarshalCBOR(bytes.NewReader(ret.Return)); err != nil { - return nil, err - } - - return func(info *ClientDeal) { - info.DealID = res.DealIDs[dealIdx] - }, nil -} - -func (c *Client) staged(ctx context.Context, deal ClientDeal) (func(*ClientDeal), error) { - // TODO: Maybe wait for pre-commit - - return nil, nil -} - -func (c *Client) sealing(ctx context.Context, deal ClientDeal) (func(*ClientDeal), error) { - checkFunc := func(ts *types.TipSet) (done bool, more bool, err error) { - sd, err := stmgr.GetStorageDeal(ctx, c.sm, deal.DealID, ts) - if err != nil { - // TODO: This may be fine for some errors - return false, false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.ActivationEpoch > 0 { - select { - case c.updated <- clientDealUpdate{ - newState: api.DealComplete, - id: deal.ProposalCid, - }: - case <-c.stop: - } - - return true, false, nil - } - - return false, true, nil - } - - called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH uint64) (more bool, err error) { - defer func() { - if err != nil { - select { - case c.updated <- clientDealUpdate{ - newState: api.DealComplete, - id: deal.ProposalCid, - err: xerrors.Errorf("handling applied event: %w", err), - }: - case <-c.stop: - } - } - }() - - if msg == nil { - log.Error("timed out waiting for deal activation... what now?") - return false, nil - } - - sd, err := stmgr.GetStorageDeal(ctx, c.sm, deal.DealID, ts) - if err != nil { - return false, xerrors.Errorf("failed to look up deal on chain: %w", err) - } - - if sd.ActivationEpoch == 0 { - return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", deal.DealID, ts.ParentState(), ts.Height()) - } - - log.Infof("Storage deal %d activated at epoch %d", deal.DealID, sd.ActivationEpoch) - - select { - case c.updated <- clientDealUpdate{ - newState: api.DealComplete, - id: deal.ProposalCid, - }: - case <-c.stop: - } - - return false, nil - } - - revert := func(ctx context.Context, ts *types.TipSet) error { - log.Warn("deal activation reverted; TODO: actually handle this!") - // TODO: Just go back to DealSealing? - return nil - } - - matchEvent := func(msg *types.Message) (bool, error) { - if msg.To != deal.Proposal.Provider { - return false, nil - } - - if msg.Method != actors.MAMethods.ProveCommitSector { - return false, nil - } - - var params actors.SectorProveCommitInfo - if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { - return false, err - } - - var found bool - for _, dealID := range params.DealIDs { - if dealID == deal.DealID { - found = true - break - } - } - - return found, nil - } - - if err := c.events.Called(checkFunc, called, revert, 3, build.SealRandomnessLookbackLimit, matchEvent); err != nil { - return nil, xerrors.Errorf("failed to set up called handler") - } - - return nil, nil -} diff --git a/chain/deals/client_utils.go b/chain/deals/client_utils.go deleted file mode 100644 index 944846ed0..000000000 --- a/chain/deals/client_utils.go +++ /dev/null @@ -1,171 +0,0 @@ -package deals - -import ( - "bytes" - "context" - "runtime" - - sectorbuilder "github.com/filecoin-project/go-sectorbuilder" - "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" - unixfile "github.com/ipfs/go-unixfs/file" - "github.com/ipld/go-ipld-prime" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-statestore" - - "github.com/filecoin-project/lotus/datatransfer" - "github.com/filecoin-project/lotus/lib/padreader" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -func (c *Client) failDeal(id cid.Cid, cerr error) { - if cerr == nil { - _, f, l, _ := runtime.Caller(1) - cerr = xerrors.Errorf("unknown error (fail called at %s:%d)", f, l) - } - - s, ok := c.conns[id] - if ok { - _ = s.Reset() - delete(c.conns, id) - } - - // TODO: store in some sort of audit log - log.Errorf("deal %s failed: %+v", id, cerr) -} - -func (c *Client) commP(ctx context.Context, data cid.Cid) ([]byte, uint64, error) { - root, err := c.dag.Get(ctx, data) - if err != nil { - log.Errorf("failed to get file root for deal: %s", err) - return nil, 0, err - } - - n, err := unixfile.NewUnixfsFile(ctx, c.dag, root) - if err != nil { - log.Errorf("cannot open unixfs file: %s", err) - return nil, 0, err - } - - uf, ok := n.(files.File) - if !ok { - // TODO: we probably got directory, how should we handle this in unixfs mode? - return nil, 0, xerrors.New("unsupported unixfs type") - } - - s, err := uf.Size() - if err != nil { - return nil, 0, err - } - - pr, psize := padreader.New(uf, uint64(s)) - - commp, err := sectorbuilder.GeneratePieceCommitment(pr, psize) - if err != nil { - return nil, 0, xerrors.Errorf("generating CommP: %w", err) - } - - return commp[:], psize, nil -} - -func (c *Client) readStorageDealResp(deal ClientDeal) (*Response, error) { - s, ok := c.conns[deal.ProposalCid] - if !ok { - // TODO: Try to re-establish the connection using query protocol - return nil, xerrors.Errorf("no connection to miner") - } - - var resp SignedResponse - if err := cborutil.ReadCborRPC(s, &resp); err != nil { - log.Errorw("failed to read Response message", "error", err) - return nil, err - } - - if err := resp.Verify(deal.MinerWorker); err != nil { - return nil, xerrors.Errorf("verifying response signature failed", err) - } - - if resp.Response.Proposal != deal.ProposalCid { - return nil, xerrors.Errorf("miner responded to a wrong proposal: %s != %s", resp.Response.Proposal, deal.ProposalCid) - } - - return &resp.Response, nil -} - -func (c *Client) disconnect(deal ClientDeal) error { - s, ok := c.conns[deal.ProposalCid] - if !ok { - return nil - } - - err := s.Close() - delete(c.conns, deal.ProposalCid) - return err -} - -var _ datatransfer.RequestValidator = &ClientRequestValidator{} - -// ClientRequestValidator validates data transfer requests for the client -// in a storage market -type ClientRequestValidator struct { - deals *statestore.StateStore -} - -// NewClientRequestValidator returns a new client request validator for the -// given datastore -func NewClientRequestValidator(deals dtypes.ClientDealStore) *ClientRequestValidator { - crv := &ClientRequestValidator{ - deals: deals, - } - return crv -} - -// ValidatePush validates a push request received from the peer that will send data -// Will always error because clients should not accept push requests from a provider -// in a storage deal (i.e. send data to client). -func (c *ClientRequestValidator) ValidatePush( - sender peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - Selector ipld.Node) error { - return ErrNoPushAccepted -} - -// ValidatePull validates a pull request received from the peer that will receive data -// Will succeed only if: -// - voucher has correct type -// - voucher references an active deal -// - referenced deal matches the receiver (miner) -// - referenced deal matches the given base CID -// - referenced deal is in an acceptable state -func (c *ClientRequestValidator) ValidatePull( - receiver peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - Selector ipld.Node) error { - dealVoucher, ok := voucher.(*StorageDataTransferVoucher) - if !ok { - return xerrors.Errorf("voucher type %s: %w", voucher.Identifier(), ErrWrongVoucherType) - } - - var deal ClientDeal - err := c.deals.Get(dealVoucher.Proposal, &deal) - if err != nil { - return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) - } - if deal.Miner != receiver { - return xerrors.Errorf("Deal Peer %s, Data Transfer Peer %s: %w", deal.Miner.String(), receiver.String(), ErrWrongPeer) - } - if !bytes.Equal(deal.Proposal.PieceRef, baseCid.Bytes()) { - return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", string(deal.Proposal.PieceRef), baseCid.String(), ErrWrongPiece) - } - for _, state := range DataTransferStates { - if deal.State == state { - return nil - } - } - return xerrors.Errorf("Deal State %s: %w", deal.State, ErrInacceptableDealState) -} diff --git a/chain/deals/provider.go b/chain/deals/provider.go deleted file mode 100644 index 9e45ef8b2..000000000 --- a/chain/deals/provider.go +++ /dev/null @@ -1,294 +0,0 @@ -package deals - -import ( - "context" - "errors" - "sync" - - cid "github.com/ipfs/go-cid" - datastore "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/datatransfer" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage" - "github.com/filecoin-project/lotus/storage/sectorblocks" -) - -var ProviderDsPrefix = "/deals/provider" - -type MinerDeal struct { - Client peer.ID - Proposal actors.StorageDealProposal - ProposalCid cid.Cid - State api.DealState - - Ref cid.Cid - - DealID uint64 - SectorID uint64 // Set when State >= DealStaged - - s inet.Stream -} - -type Provider struct { - pricePerByteBlock types.BigInt // how much we want for storing one byte for one block - minPieceSize uint64 - - ask *types.SignedStorageAsk - askLk sync.Mutex - - secb *sectorblocks.SectorBlocks - sminer *storage.Miner - full api.FullNode - - // TODO: This will go away once storage market module + CAR - // is implemented - dag dtypes.StagingDAG - - // dataTransfer is the manager of data transfers used by this storage provider - dataTransfer dtypes.ProviderDataTransfer - - deals *statestore.StateStore - ds dtypes.MetadataDS - - conns map[cid.Cid]inet.Stream - - actor address.Address - - incoming chan MinerDeal - updated chan minerDealUpdate - stop chan struct{} - stopped chan struct{} -} - -type minerDealUpdate struct { - newState api.DealState - id cid.Cid - err error - mut func(*MinerDeal) -} - -var ( - // ErrDataTransferFailed means a data transfer for a deal failed - ErrDataTransferFailed = errors.New("deal data transfer failed") -) - -func NewProvider(ds dtypes.MetadataDS, sminer *storage.Miner, secb *sectorblocks.SectorBlocks, dag dtypes.StagingDAG, dataTransfer dtypes.ProviderDataTransfer, fullNode api.FullNode) (*Provider, error) { - addr, err := ds.Get(datastore.NewKey("miner-address")) - if err != nil { - return nil, err - } - minerAddress, err := address.NewFromBytes(addr) - if err != nil { - return nil, err - } - - h := &Provider{ - sminer: sminer, - dag: dag, - dataTransfer: dataTransfer, - full: fullNode, - secb: secb, - - pricePerByteBlock: types.NewInt(3), // TODO: allow setting - minPieceSize: 256, // TODO: allow setting (BUT KEEP MIN 256! (because of how we fill sectors up)) - - conns: map[cid.Cid]inet.Stream{}, - - incoming: make(chan MinerDeal), - updated: make(chan minerDealUpdate), - stop: make(chan struct{}), - stopped: make(chan struct{}), - - actor: minerAddress, - - deals: statestore.New(namespace.Wrap(ds, datastore.NewKey(ProviderDsPrefix))), - ds: ds, - } - - if err := h.tryLoadAsk(); err != nil { - return nil, err - } - - if h.ask == nil { - // TODO: we should be fine with this state, and just say it means 'not actively accepting deals' - // for now... lets just set a price - if err := h.SetPrice(types.NewInt(500_000_000), 1000000); err != nil { - return nil, xerrors.Errorf("failed setting a default price: %w", err) - } - } - - // register a data transfer event handler -- this will move deals from - // accepted to staged - h.dataTransfer.SubscribeToEvents(h.onDataTransferEvent) - - return h, nil -} - -func (p *Provider) Run(ctx context.Context) { - // TODO: restore state - - go func() { - defer log.Warn("quitting deal provider loop") - defer close(p.stopped) - - for { - select { - case deal := <-p.incoming: // DealAccepted - p.onIncoming(deal) - case update := <-p.updated: // DealStaged - p.onUpdated(ctx, update) - case <-p.stop: - return - } - } - }() -} - -func (p *Provider) onIncoming(deal MinerDeal) { - log.Info("incoming deal") - - p.conns[deal.ProposalCid] = deal.s - - if err := p.deals.Begin(deal.ProposalCid, &deal); err != nil { - // This can happen when client re-sends proposal - p.failDeal(deal.ProposalCid, err) - log.Errorf("deal tracking failed: %s", err) - return - } - - go func() { - p.updated <- minerDealUpdate{ - newState: api.DealAccepted, - id: deal.ProposalCid, - err: nil, - } - }() -} - -func (p *Provider) onUpdated(ctx context.Context, update minerDealUpdate) { - log.Infof("Deal %s updated state to %s", update.id, api.DealStates[update.newState]) - if update.err != nil { - log.Errorf("deal %s (newSt: %d) failed: %+v", update.id, update.newState, update.err) - p.failDeal(update.id, update.err) - return - } - var deal MinerDeal - err := p.deals.Mutate(update.id, func(d *MinerDeal) error { - d.State = update.newState - if update.mut != nil { - update.mut(d) - } - deal = *d - return nil - }) - if err != nil { - p.failDeal(update.id, err) - return - } - - switch update.newState { - case api.DealAccepted: - p.handle(ctx, deal, p.accept, api.DealNoUpdate) - case api.DealStaged: - p.handle(ctx, deal, p.staged, api.DealSealing) - case api.DealSealing: - p.handle(ctx, deal, p.sealing, api.DealComplete) - case api.DealComplete: - p.handle(ctx, deal, p.complete, api.DealNoUpdate) - } -} - -// onDataTransferEvent is the function called when an event occurs in a data -// transfer -- it reads the voucher to verify this even occurred in a storage -// market deal, then, based on the data transfer event that occurred, it generates -// and update message for the deal -- either moving to staged for a completion -// event or moving to error if a data transfer error occurs -func (p *Provider) onDataTransferEvent(event datatransfer.Event, channelState datatransfer.ChannelState) { - voucher, ok := channelState.Voucher().(*StorageDataTransferVoucher) - // if this event is for a transfer not related to storage, ignore - if !ok { - return - } - - // data transfer events for opening and progress do not affect deal state - var next api.DealState - var err error - var mut func(*MinerDeal) - switch event { - case datatransfer.Complete: - next = api.DealStaged - mut = func(deal *MinerDeal) { - deal.DealID = voucher.DealID - } - case datatransfer.Error: - next = api.DealFailed - err = ErrDataTransferFailed - default: - // the only events we care about are complete and error - return - } - - select { - case p.updated <- minerDealUpdate{ - newState: next, - id: voucher.Proposal, - err: err, - mut: mut, - }: - case <-p.stop: - } -} - -func (p *Provider) newDeal(s inet.Stream, proposal Proposal) (MinerDeal, error) { - proposalNd, err := cborutil.AsIpld(proposal.DealProposal) - if err != nil { - return MinerDeal{}, err - } - - return MinerDeal{ - Client: s.Conn().RemotePeer(), - Proposal: *proposal.DealProposal, - ProposalCid: proposalNd.Cid(), - State: api.DealUnknown, - - Ref: proposal.Piece, - - s: s, - }, nil -} - -func (p *Provider) HandleStream(s inet.Stream) { - log.Info("Handling storage deal proposal!") - - proposal, err := p.readProposal(s) - if err != nil { - log.Error(err) - s.Close() - return - } - - deal, err := p.newDeal(s, proposal) - if err != nil { - log.Errorf("%+v", err) - s.Close() - return - } - - p.incoming <- deal -} - -func (p *Provider) Stop() { - close(p.stop) - <-p.stopped -} diff --git a/chain/deals/provider_asks.go b/chain/deals/provider_asks.go deleted file mode 100644 index 1170eb28b..000000000 --- a/chain/deals/provider_asks.go +++ /dev/null @@ -1,160 +0,0 @@ -package deals - -import ( - "bytes" - "context" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - datastore "github.com/ipfs/go-datastore" - inet "github.com/libp2p/go-libp2p-core/network" - "golang.org/x/xerrors" -) - -func (p *Provider) SetPrice(price types.BigInt, ttlsecs int64) error { - p.askLk.Lock() - defer p.askLk.Unlock() - - var seqno uint64 - if p.ask != nil { - seqno = p.ask.Ask.SeqNo + 1 - } - - now := time.Now().Unix() - ask := &types.StorageAsk{ - Price: price, - Timestamp: uint64(now), - Expiry: uint64(now + ttlsecs), - Miner: p.actor, - SeqNo: seqno, - MinPieceSize: p.minPieceSize, - } - - ssa, err := p.signAsk(ask) - if err != nil { - return err - } - - return p.saveAsk(ssa) -} - -func (p *Provider) getAsk(m address.Address) *types.SignedStorageAsk { - p.askLk.Lock() - defer p.askLk.Unlock() - if m != p.actor { - return nil - } - - return p.ask -} - -func (p *Provider) HandleAskStream(s inet.Stream) { - defer s.Close() - var ar AskRequest - if err := cborutil.ReadCborRPC(s, &ar); err != nil { - log.Errorf("failed to read AskRequest from incoming stream: %s", err) - return - } - - resp := p.processAskRequest(&ar) - - if err := cborutil.WriteCborRPC(s, resp); err != nil { - log.Errorf("failed to write ask response: %s", err) - return - } -} - -func (p *Provider) processAskRequest(ar *AskRequest) *AskResponse { - return &AskResponse{ - Ask: p.getAsk(ar.Miner), - } -} - -var bestAskKey = datastore.NewKey("latest-ask") - -func (p *Provider) tryLoadAsk() error { - p.askLk.Lock() - defer p.askLk.Unlock() - - err := p.loadAsk() - if err != nil { - if xerrors.Is(err, datastore.ErrNotFound) { - log.Warn("no previous ask found, miner will not accept deals until a price is set") - return nil - } - return err - } - - return nil -} - -func (p *Provider) loadAsk() error { - askb, err := p.ds.Get(datastore.NewKey("latest-ask")) - if err != nil { - return xerrors.Errorf("failed to load most recent ask from disk: %w", err) - } - - var ssa types.SignedStorageAsk - if err := cborutil.ReadCborRPC(bytes.NewReader(askb), &ssa); err != nil { - return err - } - - p.ask = &ssa - return nil -} - -func (p *Provider) signAsk(a *types.StorageAsk) (*types.SignedStorageAsk, error) { - b, err := cborutil.Dump(a) - if err != nil { - return nil, err - } - - worker, err := p.getWorker(p.actor) - if err != nil { - return nil, xerrors.Errorf("failed to get worker to sign ask: %w", err) - } - - sig, err := p.full.WalletSign(context.TODO(), worker, b) - if err != nil { - return nil, err - } - - return &types.SignedStorageAsk{ - Ask: a, - Signature: sig, - }, nil -} - -func (p *Provider) saveAsk(a *types.SignedStorageAsk) error { - b, err := cborutil.Dump(a) - if err != nil { - return err - } - - if err := p.ds.Put(bestAskKey, b); err != nil { - return err - } - - p.ask = a - return nil -} - -func (c *Client) checkAskSignature(ask *types.SignedStorageAsk) error { - tss := c.sm.ChainStore().GetHeaviestTipSet().ParentState() - - w, err := stmgr.GetMinerWorkerRaw(context.TODO(), c.sm, tss, ask.Ask.Miner) - if err != nil { - return xerrors.Errorf("failed to get worker for miner in ask", err) - } - - sigb, err := cborutil.Dump(ask.Ask) - if err != nil { - return xerrors.Errorf("failed to re-serialize ask") - } - - return ask.Signature.Verify(w, sigb) - -} diff --git a/chain/deals/provider_states.go b/chain/deals/provider_states.go deleted file mode 100644 index 4faa44973..000000000 --- a/chain/deals/provider_states.go +++ /dev/null @@ -1,215 +0,0 @@ -package deals - -import ( - "bytes" - "context" - - ipldfree "github.com/ipld/go-ipld-prime/impl/free" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - - unixfile "github.com/ipfs/go-unixfs/file" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/padreader" - "github.com/filecoin-project/lotus/storage/sectorblocks" -) - -type providerHandlerFunc func(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) - -func (p *Provider) handle(ctx context.Context, deal MinerDeal, cb providerHandlerFunc, next api.DealState) { - go func() { - mut, err := cb(ctx, deal) - - if err == nil && next == api.DealNoUpdate { - return - } - - select { - case p.updated <- minerDealUpdate{ - newState: next, - id: deal.ProposalCid, - err: err, - mut: mut, - }: - case <-p.stop: - } - }() -} - -// ACCEPTED -func (p *Provider) accept(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) { - - head, err := p.full.ChainHead(ctx) - if err != nil { - return nil, err - } - if head.Height() >= deal.Proposal.ProposalExpiration { - return nil, xerrors.Errorf("deal proposal already expired") - } - - // TODO: check StorageCollateral - - minPrice := types.BigDiv(types.BigMul(p.ask.Ask.Price, types.NewInt(deal.Proposal.PieceSize)), types.NewInt(1<<30)) - if deal.Proposal.StoragePricePerEpoch.LessThan(minPrice) { - return nil, xerrors.Errorf("storage price per epoch less than asking price: %s < %s", deal.Proposal.StoragePricePerEpoch, minPrice) - } - - if deal.Proposal.PieceSize < p.ask.Ask.MinPieceSize { - return nil, xerrors.Errorf("piece size less than minimum required size: %d < %d", deal.Proposal.PieceSize, p.ask.Ask.MinPieceSize) - } - - // check market funds - clientMarketBalance, err := p.full.StateMarketBalance(ctx, deal.Proposal.Client, nil) - if err != nil { - return nil, xerrors.Errorf("getting client market balance failed: %w", err) - } - - // This doesn't guarantee that the client won't withdraw / lock those funds - // but it's a decent first filter - if clientMarketBalance.Available.LessThan(deal.Proposal.TotalStoragePrice()) { - return nil, xerrors.New("clientMarketBalance.Available too small") - } - - waddr, err := p.full.StateMinerWorker(ctx, deal.Proposal.Provider, nil) - if err != nil { - return nil, err - } - - // TODO: check StorageCollateral (may be too large (or too small)) - if err := p.full.MarketEnsureAvailable(ctx, waddr, deal.Proposal.StorageCollateral); err != nil { - return nil, err - } - - log.Info("publishing deal") - - params, err := actors.SerializeParams(&actors.PublishStorageDealsParams{ - Deals: []actors.StorageDealProposal{deal.Proposal}, - }) - if err != nil { - return nil, xerrors.Errorf("serializing PublishStorageDeals params failed: ", err) - } - - // TODO: We may want this to happen after fetching data - smsg, err := p.full.MpoolPushMessage(ctx, &types.Message{ - To: actors.StorageMarketAddress, - From: waddr, - Value: types.NewInt(0), - GasPrice: types.NewInt(0), - GasLimit: types.NewInt(1000000), - Method: actors.SMAMethods.PublishStorageDeals, - Params: params, - }) - if err != nil { - return nil, err - } - r, err := p.full.StateWaitMsg(ctx, smsg.Cid()) - if err != nil { - return nil, err - } - if r.Receipt.ExitCode != 0 { - return nil, xerrors.Errorf("publishing deal failed: exit %d", r.Receipt.ExitCode) - } - var resp actors.PublishStorageDealResponse - if err := resp.UnmarshalCBOR(bytes.NewReader(r.Receipt.Return)); err != nil { - return nil, err - } - if len(resp.DealIDs) != 1 { - return nil, xerrors.Errorf("got unexpected number of DealIDs from SMA") - } - - log.Infof("fetching data for a deal %d", resp.DealIDs[0]) - err = p.sendSignedResponse(&Response{ - State: api.DealAccepted, - - Proposal: deal.ProposalCid, - StorageDealSubmission: smsg, - }) - if err != nil { - return nil, err - } - - if err := p.disconnect(deal); err != nil { - log.Warnf("closing client connection: %+v", err) - } - - ssb := builder.NewSelectorSpecBuilder(ipldfree.NodeBuilder()) - - // this is the selector for "get the whole DAG" - // TODO: support storage deals with custom payload selectors - allSelector := ssb.ExploreRecursive(selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node() - - // initiate a pull data transfer. This will complete asynchronously and the - // completion of the data transfer will trigger a change in deal state - // (see onDataTransferEvent) - _, err = p.dataTransfer.OpenPullDataChannel(ctx, - deal.Client, - &StorageDataTransferVoucher{Proposal: deal.ProposalCid, DealID: resp.DealIDs[0]}, - deal.Ref, - allSelector, - ) - if err != nil { - return nil, xerrors.Errorf("failed to open pull data channel: %w", err) - } - - return nil, nil -} - -// STAGED - -func (p *Provider) staged(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) { - root, err := p.dag.Get(ctx, deal.Ref) - if err != nil { - return nil, xerrors.Errorf("failed to get file root for deal: %s", err) - } - - // TODO: abstract this away into ReadSizeCloser + implement different modes - n, err := unixfile.NewUnixfsFile(ctx, p.dag, root) - if err != nil { - return nil, xerrors.Errorf("cannot open unixfs file: %s", err) - } - - uf, ok := n.(sectorblocks.UnixfsReader) - if !ok { - // we probably got directory, unsupported for now - return nil, xerrors.Errorf("unsupported unixfs file type") - } - - // TODO: uf.Size() is user input, not trusted - // This won't be useful / here after we migrate to putting CARs into sectors - size, err := uf.Size() - if err != nil { - return nil, xerrors.Errorf("getting unixfs file size: %w", err) - } - if padreader.PaddedSize(uint64(size)) != deal.Proposal.PieceSize { - return nil, xerrors.Errorf("deal.Proposal.PieceSize didn't match padded unixfs file size") - } - - sectorID, err := p.secb.AddUnixfsPiece(ctx, uf, deal.DealID) - if err != nil { - return nil, xerrors.Errorf("AddPiece failed: %s", err) - } - log.Warnf("New Sector: %d (deal %d)", sectorID, deal.DealID) - - return func(deal *MinerDeal) { - deal.SectorID = sectorID - }, nil -} - -// SEALING - -func (p *Provider) sealing(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) { - // TODO: consider waiting for seal to happen - - return nil, nil -} - -func (p *Provider) complete(ctx context.Context, deal MinerDeal) (func(*MinerDeal), error) { - // TODO: observe sector lifecycle, status, expiration.. - - return nil, nil -} diff --git a/chain/deals/provider_utils.go b/chain/deals/provider_utils.go deleted file mode 100644 index e1fcfcf32..000000000 --- a/chain/deals/provider_utils.go +++ /dev/null @@ -1,198 +0,0 @@ -package deals - -import ( - "bytes" - "context" - "runtime" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/datatransfer" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/ipld/go-ipld-prime" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/ipfs/go-cid" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" -) - -func (p *Provider) failDeal(id cid.Cid, cerr error) { - if err := p.deals.End(id); err != nil { - log.Warnf("deals.End: %s", err) - } - - if cerr == nil { - _, f, l, _ := runtime.Caller(1) - cerr = xerrors.Errorf("unknown error (fail called at %s:%d)", f, l) - } - - log.Warnf("deal %s failed: %s", id, cerr) - - err := p.sendSignedResponse(&Response{ - State: api.DealFailed, - Message: cerr.Error(), - Proposal: id, - }) - - s, ok := p.conns[id] - if ok { - _ = s.Reset() - delete(p.conns, id) - } - - if err != nil { - log.Warnf("notifying client about deal failure: %s", err) - } -} - -func (p *Provider) readProposal(s inet.Stream) (proposal Proposal, err error) { - if err := cborutil.ReadCborRPC(s, &proposal); err != nil { - log.Errorw("failed to read proposal message", "error", err) - return proposal, err - } - - if err := proposal.DealProposal.Verify(); err != nil { - return proposal, xerrors.Errorf("verifying StorageDealProposal: %w", err) - } - - if proposal.DealProposal.Provider != p.actor { - log.Errorf("proposal with wrong ProviderAddress: %s", proposal.DealProposal.Provider) - return proposal, err - } - - return -} - -func (p *Provider) sendSignedResponse(resp *Response) error { - s, ok := p.conns[resp.Proposal] - if !ok { - return xerrors.New("couldn't send response: not connected") - } - - msg, err := cborutil.Dump(resp) - if err != nil { - return xerrors.Errorf("serializing response: %w", err) - } - - worker, err := p.getWorker(p.actor) - if err != nil { - return err - } - - sig, err := p.full.WalletSign(context.TODO(), worker, msg) - if err != nil { - return xerrors.Errorf("failed to sign response message: %w", err) - } - - signedResponse := &SignedResponse{ - Response: *resp, - Signature: sig, - } - - err = cborutil.WriteCborRPC(s, signedResponse) - if err != nil { - // Assume client disconnected - s.Close() - delete(p.conns, resp.Proposal) - } - return err -} - -func (p *Provider) disconnect(deal MinerDeal) error { - s, ok := p.conns[deal.ProposalCid] - if !ok { - return nil - } - - err := s.Close() - delete(p.conns, deal.ProposalCid) - return err -} - -func (p *Provider) getWorker(miner address.Address) (address.Address, error) { - getworker := &types.Message{ - To: miner, - From: miner, - Method: actors.MAMethods.GetWorkerAddr, - } - r, err := p.full.StateCall(context.TODO(), getworker, nil) - if err != nil { - return address.Undef, xerrors.Errorf("getting worker address: %w", err) - } - - if r.ExitCode != 0 { - return address.Undef, xerrors.Errorf("getWorker call failed: %d", r.ExitCode) - } - - return address.NewFromBytes(r.Return) -} - -var _ datatransfer.RequestValidator = &ProviderRequestValidator{} - -// ProviderRequestValidator validates data transfer requests for the provider -// in a storage market -type ProviderRequestValidator struct { - deals *statestore.StateStore -} - -// NewProviderRequestValidator returns a new client request validator for the -// given datastore -func NewProviderRequestValidator(deals dtypes.ProviderDealStore) *ProviderRequestValidator { - return &ProviderRequestValidator{ - deals: deals, - } -} - -// ValidatePush validates a push request received from the peer that will send data -// Will succeed only if: -// - voucher has correct type -// - voucher references an active deal -// - referenced deal matches the client -// - referenced deal matches the given base CID -// - referenced deal is in an acceptable state -func (m *ProviderRequestValidator) ValidatePush( - sender peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - Selector ipld.Node) error { - dealVoucher, ok := voucher.(*StorageDataTransferVoucher) - if !ok { - return xerrors.Errorf("voucher type %s: %w", voucher.Identifier(), ErrWrongVoucherType) - } - - var deal MinerDeal - err := m.deals.Get(dealVoucher.Proposal, &deal) - if err != nil { - return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) - } - if deal.Client != sender { - return xerrors.Errorf("Deal Peer %s, Data Transfer Peer %s: %w", deal.Client.String(), sender.String(), ErrWrongPeer) - } - - if !bytes.Equal(deal.Proposal.PieceRef, baseCid.Bytes()) { - return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", string(deal.Proposal.PieceRef), baseCid.String(), ErrWrongPiece) - } - for _, state := range DataTransferStates { - if deal.State == state { - return nil - } - } - return xerrors.Errorf("Deal State %s: %w", deal.State, ErrInacceptableDealState) -} - -// ValidatePull validates a pull request received from the peer that will receive data. -// Will always error because providers should not accept pull requests from a client -// in a storage deal (i.e. send data to client). -func (m *ProviderRequestValidator) ValidatePull( - receiver peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - Selector ipld.Node) error { - return ErrNoPullAccepted -} diff --git a/chain/deals/request_validation_test.go b/chain/deals/request_validation_test.go deleted file mode 100644 index 6da68d219..000000000 --- a/chain/deals/request_validation_test.go +++ /dev/null @@ -1,291 +0,0 @@ -package deals_test - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - dss "github.com/ipfs/go-datastore/sync" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - "github.com/libp2p/go-libp2p-core/peer" - xerrors "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/deals" - "github.com/filecoin-project/lotus/chain/types" -) - -var blockGenerator = blocksutil.NewBlockGenerator() - -type wrongDTType struct { -} - -func (wrongDTType) ToBytes() ([]byte, error) { - return []byte{}, nil -} - -func (wrongDTType) FromBytes([]byte) error { - return fmt.Errorf("not implemented") -} - -func (wrongDTType) Identifier() string { - return "WrongDTTYPE" -} - -func uniqueStorageDealProposal() (actors.StorageDealProposal, error) { - clientAddr, err := address.NewIDAddress(uint64(rand.Int())) - if err != nil { - return actors.StorageDealProposal{}, err - } - providerAddr, err := address.NewIDAddress(uint64(rand.Int())) - if err != nil { - return actors.StorageDealProposal{}, err - } - return actors.StorageDealProposal{ - PieceRef: blockGenerator.Next().Cid().Bytes(), - Client: clientAddr, - Provider: providerAddr, - ProposerSignature: &types.Signature{ - Data: []byte("foo bar cat dog"), - Type: types.KTBLS, - }, - }, nil -} - -func newClientDeal(minerID peer.ID, state api.DealState) (deals.ClientDeal, error) { - newProposal, err := uniqueStorageDealProposal() - if err != nil { - return deals.ClientDeal{}, err - } - proposalNd, err := cborutil.AsIpld(&newProposal) - if err != nil { - return deals.ClientDeal{}, err - } - minerAddr, err := address.NewIDAddress(uint64(rand.Int())) - if err != nil { - return deals.ClientDeal{}, err - } - - return deals.ClientDeal{ - Proposal: newProposal, - ProposalCid: proposalNd.Cid(), - Miner: minerID, - MinerWorker: minerAddr, - State: state, - }, nil -} - -func newMinerDeal(clientID peer.ID, state api.DealState) (deals.MinerDeal, error) { - newProposal, err := uniqueStorageDealProposal() - if err != nil { - return deals.MinerDeal{}, err - } - proposalNd, err := cborutil.AsIpld(&newProposal) - if err != nil { - return deals.MinerDeal{}, err - } - ref, err := cid.Cast(newProposal.PieceRef) - if err != nil { - return deals.MinerDeal{}, err - } - - return deals.MinerDeal{ - Proposal: newProposal, - ProposalCid: proposalNd.Cid(), - Client: clientID, - State: state, - Ref: ref, - }, nil -} - -func TestClientRequestValidation(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - state := statestore.New(namespace.Wrap(ds, datastore.NewKey("/deals/client"))) - - crv := deals.NewClientRequestValidator(state) - minerID := peer.ID("fakepeerid") - block := blockGenerator.Next() - t.Run("ValidatePush fails", func(t *testing.T) { - if !xerrors.Is(crv.ValidatePush(minerID, wrongDTType{}, block.Cid(), nil), deals.ErrNoPushAccepted) { - t.Fatal("Push should fail for the client request validator for storage deals") - } - }) - t.Run("ValidatePull fails deal not found", func(t *testing.T) { - proposal, err := uniqueStorageDealProposal() - if err != nil { - t.Fatal("error creating proposal") - } - proposalNd, err := cborutil.AsIpld(&proposal) - if err != nil { - t.Fatal("error serializing proposal") - } - pieceRef, err := cid.Cast(proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if !xerrors.Is(crv.ValidatePull(minerID, &deals.StorageDataTransferVoucher{proposalNd.Cid(), 1}, pieceRef, nil), deals.ErrNoDeal) { - t.Fatal("Pull should fail if there is no deal stored") - } - }) - t.Run("ValidatePull fails wrong client", func(t *testing.T) { - otherMiner := peer.ID("otherminer") - clientDeal, err := newClientDeal(otherMiner, api.DealAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - pieceRef, err := cid.Cast(clientDeal.Proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if !xerrors.Is(crv.ValidatePull(minerID, &deals.StorageDataTransferVoucher{clientDeal.ProposalCid, 1}, pieceRef, nil), deals.ErrWrongPeer) { - t.Fatal("Pull should fail if miner address is incorrect") - } - }) - t.Run("ValidatePull fails wrong piece ref", func(t *testing.T) { - clientDeal, err := newClientDeal(minerID, api.DealAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - if !xerrors.Is(crv.ValidatePull(minerID, &deals.StorageDataTransferVoucher{clientDeal.ProposalCid, 1}, blockGenerator.Next().Cid(), nil), deals.ErrWrongPiece) { - t.Fatal("Pull should fail if piece ref is incorrect") - } - }) - t.Run("ValidatePull fails wrong deal state", func(t *testing.T) { - clientDeal, err := newClientDeal(minerID, api.DealComplete) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - pieceRef, err := cid.Cast(clientDeal.Proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if !xerrors.Is(crv.ValidatePull(minerID, &deals.StorageDataTransferVoucher{clientDeal.ProposalCid, 1}, pieceRef, nil), deals.ErrInacceptableDealState) { - t.Fatal("Pull should fail if deal is in a state that cannot be data transferred") - } - }) - t.Run("ValidatePull succeeds", func(t *testing.T) { - clientDeal, err := newClientDeal(minerID, api.DealAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - pieceRef, err := cid.Cast(clientDeal.Proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if crv.ValidatePull(minerID, &deals.StorageDataTransferVoucher{clientDeal.ProposalCid, 1}, pieceRef, nil) != nil { - t.Fatal("Pull should should succeed when all parameters are correct") - } - }) -} - -func TestProviderRequestValidation(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - state := statestore.New(namespace.Wrap(ds, datastore.NewKey("/deals/client"))) - - mrv := deals.NewProviderRequestValidator(state) - clientID := peer.ID("fakepeerid") - block := blockGenerator.Next() - t.Run("ValidatePull fails", func(t *testing.T) { - if !xerrors.Is(mrv.ValidatePull(clientID, wrongDTType{}, block.Cid(), nil), deals.ErrNoPullAccepted) { - t.Fatal("Pull should fail for the provider request validator for storage deals") - } - }) - - t.Run("ValidatePush fails deal not found", func(t *testing.T) { - proposal, err := uniqueStorageDealProposal() - if err != nil { - t.Fatal("error creating proposal") - } - proposalNd, err := cborutil.AsIpld(&proposal) - if err != nil { - t.Fatal("error serializing proposal") - } - pieceRef, err := cid.Cast(proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if !xerrors.Is(mrv.ValidatePush(clientID, &deals.StorageDataTransferVoucher{proposalNd.Cid(), 1}, pieceRef, nil), deals.ErrNoDeal) { - t.Fatal("Push should fail if there is no deal stored") - } - }) - t.Run("ValidatePush fails wrong miner", func(t *testing.T) { - otherClient := peer.ID("otherclient") - minerDeal, err := newMinerDeal(otherClient, api.DealAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - pieceRef, err := cid.Cast(minerDeal.Proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if !xerrors.Is(mrv.ValidatePush(clientID, &deals.StorageDataTransferVoucher{minerDeal.ProposalCid, 1}, pieceRef, nil), deals.ErrWrongPeer) { - t.Fatal("Push should fail if miner address is incorrect") - } - }) - t.Run("ValidatePush fails wrong piece ref", func(t *testing.T) { - minerDeal, err := newMinerDeal(clientID, api.DealAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - if !xerrors.Is(mrv.ValidatePush(clientID, &deals.StorageDataTransferVoucher{minerDeal.ProposalCid, 1}, blockGenerator.Next().Cid(), nil), deals.ErrWrongPiece) { - t.Fatal("Push should fail if piece ref is incorrect") - } - }) - t.Run("ValidatePush fails wrong deal state", func(t *testing.T) { - minerDeal, err := newMinerDeal(clientID, api.DealComplete) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - pieceRef, err := cid.Cast(minerDeal.Proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if !xerrors.Is(mrv.ValidatePush(clientID, &deals.StorageDataTransferVoucher{minerDeal.ProposalCid, 1}, pieceRef, nil), deals.ErrInacceptableDealState) { - t.Fatal("Push should fail if deal is in a state that cannot be data transferred") - } - }) - t.Run("ValidatePush succeeds", func(t *testing.T) { - minerDeal, err := newMinerDeal(clientID, api.DealAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - pieceRef, err := cid.Cast(minerDeal.Proposal.PieceRef) - if err != nil { - t.Fatal("unable to construct piece cid") - } - if mrv.ValidatePush(clientID, &deals.StorageDataTransferVoucher{minerDeal.ProposalCid, 1}, pieceRef, nil) != nil { - t.Fatal("Push should should succeed when all parameters are correct") - } - }) -} diff --git a/chain/deals/types.go b/chain/deals/types.go deleted file mode 100644 index 8a368e80d..000000000 --- a/chain/deals/types.go +++ /dev/null @@ -1,114 +0,0 @@ -package deals - -import ( - "bytes" - "errors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" -) - -var ( - // ErrWrongVoucherType means the voucher was not the correct type can validate against - ErrWrongVoucherType = errors.New("cannot validate voucher type.") - - // ErrNoPushAccepted just means clients do not accept pushes for storage deals - ErrNoPushAccepted = errors.New("client should not receive data for a storage deal.") - - // ErrNoPullAccepted just means providers do not accept pulls for storage deals - ErrNoPullAccepted = errors.New("provider should not send data for a storage deal.") - - // ErrNoDeal means no active deal was found for this vouchers proposal cid - ErrNoDeal = errors.New("no deal found for this proposal.") - - // ErrWrongPeer means that the other peer for this data transfer request does not match - // the other peer for the deal - ErrWrongPeer = errors.New("data Transfer peer id and Deal peer id do not match.") - - // ErrWrongPiece means that the pieceref for this data transfer request does not match - // the one specified in the deal - ErrWrongPiece = errors.New("base CID for deal does not match CID for piece.") - - // ErrInacceptableDealState means the deal for this transfer is not in a deal state - // where transfer can be performed - ErrInacceptableDealState = errors.New("deal is not a in a state where deals are accepted.") - - // DataTransferStates are the states in which it would make sense to actually start a data transfer - DataTransferStates = []api.DealState{api.DealAccepted, api.DealUnknown} -) - -const DealProtocolID = "/fil/storage/mk/1.0.1" -const AskProtocolID = "/fil/storage/ask/1.0.1" - -type Proposal struct { - DealProposal *actors.StorageDealProposal - - Piece cid.Cid // Used for retrieving from the client -} - -type Response struct { - State api.DealState - - // DealProposalRejected - Message string - Proposal cid.Cid - - // DealAccepted - StorageDealSubmission *types.SignedMessage -} - -// TODO: Do we actually need this to be signed? -type SignedResponse struct { - Response Response - - Signature *types.Signature -} - -func (r *SignedResponse) Verify(addr address.Address) error { - b, err := cborutil.Dump(&r.Response) - if err != nil { - return err - } - - return r.Signature.Verify(addr, b) -} - -type AskRequest struct { - Miner address.Address -} - -type AskResponse struct { - Ask *types.SignedStorageAsk -} - -// StorageDataTransferVoucher is the voucher type for data transfers -// used by the storage market -type StorageDataTransferVoucher struct { - Proposal cid.Cid - DealID uint64 -} - -// ToBytes converts the StorageDataTransferVoucher to raw bytes -func (dv *StorageDataTransferVoucher) ToBytes() ([]byte, error) { - var buf bytes.Buffer - err := dv.MarshalCBOR(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// FromBytes converts the StorageDataTransferVoucher to raw bytes -func (dv *StorageDataTransferVoucher) FromBytes(raw []byte) error { - r := bytes.NewReader(raw) - return dv.UnmarshalCBOR(r) -} - -// Identifier is the unique string identifier for a StorageDataTransferVoucher -func (dv *StorageDataTransferVoucher) Identifier() string { - return "StorageDataTransferVoucher" -} diff --git a/chain/events/events.go b/chain/events/events.go index 683dab5db..e35e91366 100644 --- a/chain/events/events.go +++ b/chain/events/events.go @@ -5,8 +5,9 @@ import ( "sync" "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -18,8 +19,8 @@ import ( var log = logging.Logger("events") -// `curH`-`ts.Height` = `confidence` -type HeightHandler func(ctx context.Context, ts *types.TipSet, curH uint64) error +// HeightHandler `curH`-`ts.Height` = `confidence` +type HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error type RevertHandler func(ctx context.Context, ts *types.TipSet) error type heightHandler struct { @@ -30,17 +31,19 @@ type heightHandler struct { revert RevertHandler } -type eventApi interface { - ChainNotify(context.Context) (<-chan []*store.HeadChange, error) +type eventAPI interface { + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) - ChainGetTipSetByHeight(context.Context, uint64, *types.TipSet) (*types.TipSet, error) - StateGetReceipt(context.Context, cid.Cid, *types.TipSet) (*types.MessageReceipt, error) + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainHead(context.Context) (*types.TipSet, error) + StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - StateGetActor(ctx context.Context, actor address.Address, ts *types.TipSet) (*types.Actor, error) // optional / for CalledMsg + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg } type Events struct { - api eventApi + api eventAPI tsc *tipSetCache lk sync.Mutex @@ -49,13 +52,13 @@ type Events struct { readyOnce sync.Once heightEvents - calledEvents + *hcEvents } -func NewEvents(ctx context.Context, api eventApi) *Events { +func NewEvents(ctx context.Context, api eventAPI) *Events { gcConfidence := 2 * build.ForkLengthThreshold - tsc := newTSCache(gcConfidence, api.ChainGetTipSetByHeight) + tsc := newTSCache(gcConfidence, api) e := &Events{ api: api, @@ -65,25 +68,14 @@ func NewEvents(ctx context.Context, api eventApi) *Events { heightEvents: heightEvents{ tsc: tsc, ctx: ctx, - gcConfidence: uint64(gcConfidence), + gcConfidence: gcConfidence, heightTriggers: map[uint64]*heightHandler{}, - htTriggerHeights: map[uint64][]uint64{}, - htHeights: map[uint64][]uint64{}, + htTriggerHeights: map[abi.ChainEpoch][]uint64{}, + htHeights: map[abi.ChainEpoch][]uint64{}, }, - calledEvents: calledEvents{ - cs: api, - tsc: tsc, - ctx: ctx, - gcConfidence: uint64(gcConfidence), - - confQueue: map[triggerH]map[msgH][]*queuedEvent{}, - revertQueue: map[msgH][]triggerH{}, - triggers: map[triggerId]*callHandler{}, - matchers: map[triggerId][]MatchFunc{}, - timeouts: map[uint64]map[triggerId]int{}, - }, + hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)), } e.ready.Add(1) @@ -108,7 +100,7 @@ func (e *Events) listenHeadChanges(ctx context.Context) { log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err()) return } - time.Sleep(time.Second) + build.Clock.Sleep(time.Second) log.Info("restarting listenHeadChanges") } } @@ -141,6 +133,8 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error { } e.readyOnce.Do(func() { + e.lastTs = cur[0].Val + e.ready.Done() }) @@ -160,6 +154,11 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error { if err := e.headChange(rev, app); err != nil { log.Warnf("headChange failed: %s", err) } + + // sync with fake chainstore (for tests) + if fcs, ok := e.api.(interface{ notifDone() }); ok { + fcs.notifDone() + } } return nil @@ -177,5 +176,5 @@ func (e *Events) headChange(rev, app []*types.TipSet) error { return err } - return e.headChangeCalled(rev, app) + return e.processHeadChangeEvent(rev, app) } diff --git a/chain/events/events_called.go b/chain/events/events_called.go index 106251b81..753206093 100644 --- a/chain/events/events_called.go +++ b/chain/events/events_called.go @@ -5,69 +5,79 @@ import ( "math" "sync" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) -const NoTimeout = math.MaxUint64 +const NoTimeout = math.MaxInt64 +const NoHeight = abi.ChainEpoch(-1) -type triggerId = uint64 +type triggerID = uint64 // msgH is the block height at which a message was present / event has happened -type msgH = uint64 +type msgH = abi.ChainEpoch // triggerH is the block height at which the listener will be notified about the // message (msgH+confidence) -type triggerH = uint64 +type triggerH = abi.ChainEpoch -// `ts` is the tipset, in which the `msg` is included. +type eventData interface{} + +// EventHandler arguments: +// `prevTs` is the previous tipset, eg the "from" tipset for a state change. +// `ts` is the event tipset, eg the tipset in which the `msg` is included. // `curH`-`ts.Height` = `confidence` -type CalledHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH uint64) (more bool, err error) +type EventHandler func(data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) // CheckFunc is used for atomicity guarantees. If the condition the callbacks // wait for has already happened in tipset `ts` // // If `done` is true, timeout won't be triggered -// If `more` is false, no messages will be sent to CalledHandler (RevertHandler +// If `more` is false, no messages will be sent to EventHandler (RevertHandler // may still be called) type CheckFunc func(ts *types.TipSet) (done bool, more bool, err error) -type MatchFunc func(msg *types.Message) (bool, error) - -type callHandler struct { +// Keep track of information for an event handler +type handlerInfo struct { confidence int - timeout uint64 + timeout abi.ChainEpoch disabled bool // TODO: GC after gcConfidence reached - handle CalledHandler + handle EventHandler revert RevertHandler } +// When a change occurs, a queuedEvent is created and put into a queue +// until the required confidence is reached type queuedEvent struct { - trigger triggerId + trigger triggerID - h uint64 - msg *types.Message + prevH abi.ChainEpoch + h abi.ChainEpoch + data eventData called bool } -type calledEvents struct { - cs eventApi +// Manages chain head change events, which may be forward (new tipset added to +// chain) or backward (chain branch discarded in favour of heavier branch) +type hcEvents struct { + cs eventAPI tsc *tipSetCache ctx context.Context gcConfidence uint64 + lastTs *types.TipSet + lk sync.Mutex - ctr triggerId + ctr triggerID - triggers map[triggerId]*callHandler - matchers map[triggerId][]MatchFunc + triggers map[triggerID]*handlerInfo // maps block heights to events // [triggerH][msgH][event] @@ -76,27 +86,81 @@ type calledEvents struct { // [msgH][triggerH] revertQueue map[msgH][]triggerH - // [timeoutH+confidence][triggerId]{calls} - timeouts map[uint64]map[triggerId]int + // [timeoutH+confidence][triggerID]{calls} + timeouts map[abi.ChainEpoch]map[triggerID]int + + messageEvents + watcherEvents } -func (e *calledEvents) headChangeCalled(rev, app []*types.TipSet) error { +func newHCEvents(ctx context.Context, cs eventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents { + e := hcEvents{ + ctx: ctx, + cs: cs, + tsc: tsc, + gcConfidence: gcConfidence, + + confQueue: map[triggerH]map[msgH][]*queuedEvent{}, + revertQueue: map[msgH][]triggerH{}, + triggers: map[triggerID]*handlerInfo{}, + timeouts: map[abi.ChainEpoch]map[triggerID]int{}, + } + + e.messageEvents = newMessageEvents(ctx, &e, cs) + e.watcherEvents = newWatcherEvents(ctx, &e, cs) + + return &e +} + +// Called when there is a change to the head with tipsets to be +// reverted / applied +func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error { + e.lk.Lock() + defer e.lk.Unlock() + for _, ts := range rev { e.handleReverts(ts) + e.lastTs = ts } for _, ts := range app { - // called triggers + // Check if the head change caused any state changes that we were + // waiting for + stateChanges := e.watcherEvents.checkStateChanges(e.lastTs, ts) - e.checkNewCalls(ts) - e.applyWithConfidence(ts) - e.applyTimeouts(ts) + // Queue up calls until there have been enough blocks to reach + // confidence on the state changes + for tid, data := range stateChanges { + e.queueForConfidence(tid, data, e.lastTs, ts) + } + + // Check if the head change included any new message calls + newCalls, err := e.messageEvents.checkNewCalls(ts) + if err != nil { + return err + } + + // Queue up calls until there have been enough blocks to reach + // confidence on the message calls + for tid, data := range newCalls { + e.queueForConfidence(tid, data, nil, ts) + } + + for at := e.lastTs.Height(); at <= ts.Height(); at++ { + // Apply any queued events and timeouts that were targeted at the + // current chain height + e.applyWithConfidence(ts, at) + e.applyTimeouts(ts) + } + + // Update the latest known tipset + e.lastTs = ts } return nil } -func (e *calledEvents) handleReverts(ts *types.TipSet) { +func (e *hcEvents) handleReverts(ts *types.TipSet) { reverts, ok := e.revertQueue[ts.Height()] if !ok { return // nothing to do @@ -112,7 +176,7 @@ func (e *calledEvents) handleReverts(ts *types.TipSet) { trigger := e.triggers[event.trigger] if err := trigger.revert(e.ctx, ts); err != nil { - log.Errorf("reverting chain trigger (call %s.%d() @H %d, called @ %d) failed: %s", event.msg.To, event.msg.Method, ts.Height(), triggerH, err) + log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", ts.Height(), triggerH, err) } } delete(e.confQueue[triggerH], ts.Height()) @@ -120,66 +184,46 @@ func (e *calledEvents) handleReverts(ts *types.TipSet) { delete(e.revertQueue, ts.Height()) } -func (e *calledEvents) checkNewCalls(ts *types.TipSet) { - e.messagesForTs(ts, func(msg *types.Message) { - // TODO: provide receipts +// Queue up events until the chain has reached a height that reflects the +// desired confidence +func (e *hcEvents) queueForConfidence(trigID uint64, data eventData, prevTs, ts *types.TipSet) { + trigger := e.triggers[trigID] - for tid, matchFns := range e.matchers { - var matched bool - for _, matchFn := range matchFns { - ok, err := matchFn(msg) - if err != nil { - log.Warnf("event matcher failed: %s") - continue - } - matched = ok + prevH := NoHeight + if prevTs != nil { + prevH = prevTs.Height() + } + appliedH := ts.Height() - if matched { - break - } - } - - if matched { - e.queueForConfidence(tid, msg, ts) - break - } - } - }) -} - -func (e *calledEvents) queueForConfidence(triggerId uint64, msg *types.Message, ts *types.TipSet) { - trigger := e.triggers[triggerId] - - // messages are not applied in the tipset they are included in - appliedH := ts.Height() + 1 - - triggerH := appliedH + uint64(trigger.confidence) + triggerH := appliedH + abi.ChainEpoch(trigger.confidence) byOrigH, ok := e.confQueue[triggerH] if !ok { - byOrigH = map[uint64][]*queuedEvent{} + byOrigH = map[abi.ChainEpoch][]*queuedEvent{} e.confQueue[triggerH] = byOrigH } byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{ - trigger: triggerId, + trigger: trigID, + prevH: prevH, h: appliedH, - msg: msg, + data: data, }) e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH) } -func (e *calledEvents) applyWithConfidence(ts *types.TipSet) { - byOrigH, ok := e.confQueue[ts.Height()] +// Apply any events that were waiting for this chain height for confidence +func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch) { + byOrigH, ok := e.confQueue[height] if !ok { - return // no triggers at thin height + return // no triggers at this height } for origH, events := range byOrigH { triggerTs, err := e.tsc.get(origH) if err != nil { - log.Errorf("events: applyWithConfidence didn't find tipset for event; wanted %d; current %d", origH, ts.Height()) + log.Errorf("events: applyWithConfidence didn't find tipset for event; wanted %d; current %d", origH, height) } for _, event := range events { @@ -192,15 +236,20 @@ func (e *calledEvents) applyWithConfidence(ts *types.TipSet) { continue } - rec, err := e.cs.StateGetReceipt(e.ctx, event.msg.Cid(), ts) - if err != nil { - log.Error(err) - return + // Previous tipset - this is relevant for example in a state change + // from one tipset to another + var prevTs *types.TipSet + if event.prevH != NoHeight { + prevTs, err = e.tsc.get(event.prevH) + if err != nil { + log.Errorf("events: applyWithConfidence didn't find tipset for previous event; wanted %d; current %d", event.prevH, height) + continue + } } - more, err := trigger.handle(event.msg, rec, triggerTs, ts.Height()) + more, err := trigger.handle(event.data, prevTs, triggerTs, height) if err != nil { - log.Errorf("chain trigger (call %s.%d() @H %d, called @ %d) failed: %s", event.msg.To, event.msg.Method, origH, ts.Height(), err) + log.Errorf("chain trigger (@H %d, triggered @ %d) failed: %s", origH, height, err) continue // don't revert failed calls } @@ -216,24 +265,25 @@ func (e *calledEvents) applyWithConfidence(ts *types.TipSet) { } } -func (e *calledEvents) applyTimeouts(ts *types.TipSet) { +// Apply any timeouts that expire at this height +func (e *hcEvents) applyTimeouts(ts *types.TipSet) { triggers, ok := e.timeouts[ts.Height()] if !ok { return // nothing to do } - for triggerId, calls := range triggers { + for triggerID, calls := range triggers { if calls > 0 { continue // don't timeout if the method was called } - trigger := e.triggers[triggerId] + trigger := e.triggers[triggerID] if trigger.disabled { continue } - timeoutTs, err := e.tsc.get(ts.Height() - uint64(trigger.confidence)) + timeoutTs, err := e.tsc.get(ts.Height() - abi.ChainEpoch(trigger.confidence)) if err != nil { - log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", ts.Height()-uint64(trigger.confidence), ts.Height()) + log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", ts.Height()-abi.ChainEpoch(trigger.confidence), ts.Height()) } more, err := trigger.handle(nil, nil, timeoutTs, ts.Height()) @@ -246,12 +296,232 @@ func (e *calledEvents) applyTimeouts(ts *types.TipSet) { } } -func (e *calledEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) { +// Listen for an event +// - CheckFunc: immediately checks if the event already occurred +// - EventHandler: called when the event has occurred, after confidence tipsets +// - RevertHandler: called if the chain head changes causing the event to revert +// - confidence: wait this many tipsets before calling EventHandler +// - timeout: at this chain height, timeout on waiting for this event +func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) { + e.lk.Lock() + defer e.lk.Unlock() + + // Check if the event has already occurred + ts, err := e.tsc.best() + if err != nil { + return 0, xerrors.Errorf("error getting best tipset: %w", err) + } + done, more, err := check(ts) + if err != nil { + return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err) + } + if done { + timeout = NoTimeout + } + + // Create a trigger for the event + id := e.ctr + e.ctr++ + + e.triggers[id] = &handlerInfo{ + confidence: confidence, + timeout: timeout + abi.ChainEpoch(confidence), + + disabled: !more, + + handle: hnd, + revert: rev, + } + + // If there's a timeout, set up a timeout check at that height + if timeout != NoTimeout { + if e.timeouts[timeout+abi.ChainEpoch(confidence)] == nil { + e.timeouts[timeout+abi.ChainEpoch(confidence)] = map[uint64]int{} + } + e.timeouts[timeout+abi.ChainEpoch(confidence)][id] = 0 + } + + return id, nil +} + +// headChangeAPI is used to allow the composed event APIs to call back to hcEvents +// to listen for changes +type headChangeAPI interface { + onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) +} + +// watcherEvents watches for a state change +type watcherEvents struct { + ctx context.Context + cs eventAPI + hcAPI headChangeAPI + + lk sync.RWMutex + matchers map[triggerID]StateMatchFunc +} + +func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) watcherEvents { + return watcherEvents{ + ctx: ctx, + cs: cs, + hcAPI: hcAPI, + matchers: make(map[triggerID]StateMatchFunc), + } +} + +// Run each of the matchers against the previous and current state to see if +// there's a change +func (we *watcherEvents) checkStateChanges(oldState, newState *types.TipSet) map[triggerID]eventData { + we.lk.RLock() + defer we.lk.RUnlock() + + res := make(map[triggerID]eventData) + for tid, matchFn := range we.matchers { + ok, data, err := matchFn(oldState, newState) + if err != nil { + log.Errorf("event diff fn failed: %s", err) + continue + } + + if ok { + res[tid] = data + } + } + return res +} + +// StateChange represents a change in state +type StateChange interface{} + +// StateChangeHandler arguments: +// `oldTs` is the state "from" tipset +// `newTs` is the state "to" tipset +// `states` is the change in state +// `curH`-`ts.Height` = `confidence` +type StateChangeHandler func(oldTs, newTs *types.TipSet, states StateChange, curH abi.ChainEpoch) (more bool, err error) + +type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error) + +// StateChanged registers a callback which is triggered when a specified state +// change occurs or a timeout is reached. +// +// * `CheckFunc` callback is invoked immediately with a recent tipset, it +// returns two booleans - `done`, and `more`. +// +// * `done` should be true when some on-chain state change we are waiting +// for has happened. When `done` is set to true, timeout trigger is disabled. +// +// * `more` should be false when we don't want to receive new notifications +// through StateChangeHandler. Note that notifications may still be delivered to +// RevertHandler +// +// * `StateChangeHandler` is called when the specified state change was observed +// on-chain, and a confidence threshold was reached, or the specified `timeout` +// height was reached with no state change observed. When this callback is +// invoked on a timeout, `oldState` and `newState` are set to nil. +// This callback returns a boolean specifying whether further notifications +// should be sent, like `more` return param from `CheckFunc` above. +// +// * `RevertHandler` is called after apply handler, when we drop the tipset +// containing the message. The tipset passed as the argument is the tipset +// that is being dropped. Note that the event dropped may be re-applied +// in a different tipset in small amount of time. +// +// * `StateMatchFunc` is called against each tipset state. If there is a match, +// the state change is queued up until the confidence interval has elapsed (and +// `StateChangeHandler` is called) +func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error { + hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) { + states, ok := data.(StateChange) + if data != nil && !ok { + panic("expected StateChange") + } + + return scHnd(prevTs, ts, states, height) + } + + id, err := we.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout) + if err != nil { + return err + } + + we.lk.Lock() + defer we.lk.Unlock() + we.matchers[id] = mf + + return nil +} + +// messageEvents watches for message calls to actors +type messageEvents struct { + ctx context.Context + cs eventAPI + hcAPI headChangeAPI + + lk sync.RWMutex + matchers map[triggerID][]MsgMatchFunc +} + +func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) messageEvents { + return messageEvents{ + ctx: ctx, + cs: cs, + hcAPI: hcAPI, + matchers: map[triggerID][]MsgMatchFunc{}, + } +} + +// Check if there are any new actor calls +func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventData, error) { + pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here + if err != nil { + log.Errorf("getting parent tipset in checkNewCalls: %s", err) + return nil, err + } + + me.lk.RLock() + defer me.lk.RUnlock() + + res := make(map[triggerID]eventData) + me.messagesForTs(pts, func(msg *types.Message) { + // TODO: provide receipts + + for tid, matchFns := range me.matchers { + var matched bool + var once bool + for _, matchFn := range matchFns { + matchOne, ok, err := matchFn(msg) + if err != nil { + log.Errorf("event matcher failed: %s", err) + continue + } + matched = ok + once = matchOne + + if matched { + break + } + } + + if matched { + res[tid] = msg + if once { + break + } + } + } + }) + + return res, nil +} + +// Get the messages in a tipset +func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) { seen := map[cid.Cid]struct{}{} for _, tsb := range ts.Blocks() { - msgs, err := e.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) + msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) if err != nil { log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err) // this is quite bad, but probably better than missing all the other updates @@ -280,20 +550,27 @@ func (e *calledEvents) messagesForTs(ts *types.TipSet, consume func(*types.Messa } } -// Called registers a callbacks which are triggered when a specified method is +// MsgHandler arguments: +// `ts` is the tipset, in which the `msg` is included. +// `curH`-`ts.Height` = `confidence` +type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) + +type MsgMatchFunc func(msg *types.Message) (matchOnce bool, matched bool, err error) + +// Called registers a callback which is triggered when a specified method is // called on an actor, or a timeout is reached. // // * `CheckFunc` callback is invoked immediately with a recent tipset, it // returns two booleans - `done`, and `more`. // -// * `done` should be true when some on-chain action we are waiting for has -// happened. When `done` is set to true, timeout trigger is disabled. +// * `done` should be true when some on-chain action we are waiting for has +// happened. When `done` is set to true, timeout trigger is disabled. // -// * `more` should be false when we don't want to receive new notifications -// through CalledHandler. Note that notifications may still be delivered to -// RevertHandler +// * `more` should be false when we don't want to receive new notifications +// through MsgHandler. Note that notifications may still be delivered to +// RevertHandler // -// * `CalledHandler` is called when the specified event was observed on-chain, +// * `MsgHandler` is called when the specified event was observed on-chain, // and a confidence threshold was reached, or the specified `timeout` height // was reached with no events observed. When this callback is invoked on a // timeout, `msg` is set to nil. This callback returns a boolean specifying @@ -304,44 +581,38 @@ func (e *calledEvents) messagesForTs(ts *types.TipSet, consume func(*types.Messa // containing the message. The tipset passed as the argument is the tipset // that is being dropped. Note that the message dropped may be re-applied // in a different tipset in small amount of time. -func (e *calledEvents) Called(check CheckFunc, hnd CalledHandler, rev RevertHandler, confidence int, timeout uint64, mf MatchFunc) error { - e.lk.Lock() - defer e.lk.Unlock() - - ts := e.tsc.best() - done, more, err := check(ts) - if err != nil { - return xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err) - } - if done { - timeout = NoTimeout - } - - id := e.ctr - e.ctr++ - - e.triggers[id] = &callHandler{ - confidence: confidence, - timeout: timeout + uint64(confidence), - - disabled: !more, - - handle: hnd, - revert: rev, - } - - e.matchers[id] = append(e.matchers[id], mf) - - if timeout != NoTimeout { - if e.timeouts[timeout+uint64(confidence)] == nil { - e.timeouts[timeout+uint64(confidence)] = map[uint64]int{} +// +// * `MsgMatchFunc` is called against each message. If there is a match, the +// message is queued up until the confidence interval has elapsed (and +// `MsgHandler` is called) +func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error { + hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) { + msg, ok := data.(*types.Message) + if data != nil && !ok { + panic("expected msg") } - e.timeouts[timeout+uint64(confidence)][id] = 0 + + rec, err := me.cs.StateGetReceipt(me.ctx, msg.Cid(), ts.Key()) + if err != nil { + return false, err + } + + return msgHnd(msg, rec, ts, height) } + id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout) + if err != nil { + return err + } + + me.lk.Lock() + defer me.lk.Unlock() + me.matchers[id] = append(me.matchers[id], mf) + return nil } -func (e *calledEvents) CalledMsg(ctx context.Context, hnd CalledHandler, rev RevertHandler, confidence int, timeout uint64, msg store.ChainMsg) error { - return e.Called(e.CheckMsg(ctx, msg, hnd), hnd, rev, confidence, timeout, e.MatchMsg(msg.VMMessage())) +// Convenience function for checking and matching messages +func (me *messageEvents) CalledMsg(ctx context.Context, hnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error { + return me.Called(me.CheckMsg(ctx, msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage())) } diff --git a/chain/events/events_height.go b/chain/events/events_height.go index a407d167a..c8dd905d9 100644 --- a/chain/events/events_height.go +++ b/chain/events/events_height.go @@ -4,7 +4,9 @@ import ( "context" "sync" + "github.com/filecoin-project/go-state-types/abi" "go.opencensus.io/trace" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" ) @@ -12,14 +14,14 @@ import ( type heightEvents struct { lk sync.Mutex tsc *tipSetCache - gcConfidence uint64 + gcConfidence abi.ChainEpoch - ctr triggerId + ctr triggerID - heightTriggers map[triggerId]*heightHandler + heightTriggers map[triggerID]*heightHandler - htTriggerHeights map[triggerH][]triggerId - htHeights map[msgH][]triggerId + htTriggerHeights map[triggerH][]triggerID + htHeights map[msgH][]triggerID ctx context.Context } @@ -31,15 +33,20 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) + e.lk.Lock() + defer e.lk.Unlock() for _, ts := range rev { // TODO: log error if h below gcconfidence // revert height-based triggers - revert := func(h uint64, ts *types.TipSet) { + revert := func(h abi.ChainEpoch, ts *types.TipSet) { for _, tid := range e.htHeights[h] { ctx, span := trace.StartSpan(ctx, "events.HeightRevert") - err := e.heightTriggers[tid].revert(ctx, ts) + rev := e.heightTriggers[tid].revert + e.lk.Unlock() + err := rev(ctx, ts) + e.lk.Lock() e.heightTriggers[tid].called = false span.End() @@ -80,15 +87,14 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { // height triggers - apply := func(h uint64, ts *types.TipSet) error { + apply := func(h abi.ChainEpoch, ts *types.TipSet) error { for _, tid := range e.htTriggerHeights[h] { hnd := e.heightTriggers[tid] if hnd.called { return nil } - hnd.called = true - triggerH := h - uint64(hnd.confidence) + triggerH := h - abi.ChainEpoch(hnd.confidence) incTs, err := e.tsc.getNonNull(triggerH) if err != nil { @@ -97,8 +103,11 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { ctx, span := trace.StartSpan(ctx, "events.HeightApply") span.AddAttributes(trace.BoolAttribute("immediate", false)) - - err = hnd.handle(ctx, incTs, h) + handle := hnd.handle + e.lk.Unlock() + err = handle(ctx, incTs, h) + e.lk.Lock() + hnd.called = true span.End() if err != nil { @@ -135,17 +144,20 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { } // ChainAt invokes the specified `HeightHandler` when the chain reaches the -// specified height+confidence threshold. If the chain is rolled-back under the -// specified height, `RevertHandler` will be called. +// specified height+confidence threshold. If the chain is rolled-back under the +// specified height, `RevertHandler` will be called. // // ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null -func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h uint64) error { - +func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error { e.lk.Lock() // Tricky locking, check your locks if you modify this function! - bestH := e.tsc.best().Height() + best, err := e.tsc.best() + if err != nil { + return xerrors.Errorf("error getting best tipset: %w", err) + } - if bestH >= h+uint64(confidence) { + bestH := best.Height() + if bestH >= h+abi.ChainEpoch(confidence) { ts, err := e.tsc.getNonNull(h) if err != nil { log.Warnf("events.ChainAt: calling HandleFunc with nil tipset, not found in cache: %s", err) @@ -163,16 +175,20 @@ func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence } e.lk.Lock() - bestH = e.tsc.best().Height() + best, err = e.tsc.best() + if err != nil { + return xerrors.Errorf("error getting best tipset: %w", err) + } + bestH = best.Height() } defer e.lk.Unlock() - if bestH >= h+uint64(confidence)+e.gcConfidence { + if bestH >= h+abi.ChainEpoch(confidence)+e.gcConfidence { return nil } - triggerAt := h + uint64(confidence) + triggerAt := h + abi.ChainEpoch(confidence) id := e.ctr e.ctr++ diff --git a/chain/events/events_test.go b/chain/events/events_test.go index a18c916e2..0e4fd34b2 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -3,18 +3,20 @@ package events import ( "context" "fmt" + "sync" "testing" - "time" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/store" "github.com/ipfs/go-cid" "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) @@ -31,28 +33,40 @@ type fakeMsg struct { type fakeCS struct { t *testing.T - h uint64 + h abi.ChainEpoch tsc *tipSetCache msgs map[cid.Cid]fakeMsg blkMsgs map[cid.Cid]cid.Cid + sync sync.Mutex + + tipsets map[types.TipSetKey]*types.TipSet + sub func(rev, app []*types.TipSet) } -func (fcs *fakeCS) StateGetReceipt(context.Context, cid.Cid, *types.TipSet) (*types.MessageReceipt, error) { +func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) { + panic("implement me") +} + +func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { + return fcs.tipsets[key], nil +} + +func (fcs *fakeCS) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) { return nil, nil } -func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, ts *types.TipSet) (*types.Actor, error) { +func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { panic("Not Implemented") } -func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, uint64, *types.TipSet) (*types.TipSet, error) { +func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { panic("Not Implemented") } -func makeTs(t *testing.T, h uint64, msgcid cid.Cid) *types.TipSet { +func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msgcid cid.Cid) *types.TipSet { a, _ := address.NewFromString("t00") b, _ := address.NewFromString("t02") var ts, err = types.NewTipSet([]*types.BlockHeader{ @@ -60,50 +74,63 @@ func makeTs(t *testing.T, h uint64, msgcid cid.Cid) *types.TipSet { Height: h, Miner: a, + Parents: parents, + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, ParentStateRoot: dummyCid, Messages: msgcid, ParentMessageReceipts: dummyCid, - BlockSig: &types.Signature{Type: types.KTBLS}, - BLSAggregate: types.Signature{Type: types.KTBLS}, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, }, { Height: h, Miner: b, + Parents: parents, + Ticket: &types.Ticket{VRFProof: []byte{byte((h + 1) % 2)}}, ParentStateRoot: dummyCid, Messages: msgcid, ParentMessageReceipts: dummyCid, - BlockSig: &types.Signature{Type: types.KTBLS}, - BLSAggregate: types.Signature{Type: types.KTBLS}, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, }, }) + if fcs.tipsets == nil { + fcs.tipsets = map[types.TipSetKey]*types.TipSet{} + } + fcs.tipsets[ts.Key()] = ts + require.NoError(t, err) return ts } -func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*store.HeadChange, error) { - out := make(chan []*store.HeadChange, 1) - out <- []*store.HeadChange{{Type: store.HCCurrent, Val: fcs.tsc.best()}} +func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) { + out := make(chan []*api.HeadChange, 1) + best, err := fcs.tsc.best() + if err != nil { + return nil, err + } + out <- []*api.HeadChange{{Type: store.HCCurrent, Val: best}} fcs.sub = func(rev, app []*types.TipSet) { - notif := make([]*store.HeadChange, len(rev)+len(app)) + notif := make([]*api.HeadChange, len(rev)+len(app)) for i, r := range rev { - notif[i] = &store.HeadChange{ + notif[i] = &api.HeadChange{ Type: store.HCRevert, Val: r, } } for i, r := range app { - notif[i+len(rev)] = &store.HeadChange{ + notif[i+len(rev)] = &api.HeadChange{ Type: store.HCApply, Val: r, } @@ -155,7 +182,8 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { / var revs []*types.TipSet for i := 0; i < rev; i++ { - ts := fcs.tsc.best() + ts, err := fcs.tsc.best() + require.NoError(fcs.t, err) if _, ok := nullm[int(ts.Height())]; !ok { revs = append(revs, ts) @@ -177,7 +205,9 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { / continue } - ts := makeTs(fcs.t, fcs.h, mc) + best, err := fcs.tsc.best() + require.NoError(fcs.t, err) + ts := fcs.makeTs(fcs.t, best.Key().Cids(), fcs.h, mc) require.NoError(fcs.t, fcs.tsc.add(ts)) if hasMsgs { @@ -187,11 +217,19 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { / apps = append(apps, ts) } + fcs.sync.Lock() + fcs.sub(revs, apps) - time.Sleep(100 * time.Millisecond) // TODO: :c + + fcs.sync.Lock() + fcs.sync.Unlock() //nolint:staticcheck } -var _ eventApi = &fakeCS{} +func (fcs *fakeCS) notifDone() { + fcs.sync.Unlock() +} + +var _ eventAPI = &fakeCS{} func TestAt(t *testing.T) { fcs := &fakeCS{ @@ -199,14 +237,14 @@ func TestAt(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { require.Equal(t, 5, int(ts.Height())) require.Equal(t, 8, int(curH)) applied = true @@ -264,14 +302,14 @@ func TestAtDoubleTrigger(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { require.Equal(t, 5, int(ts.Height())) require.Equal(t, 8, int(curH)) applied = true @@ -306,15 +344,15 @@ func TestAtNullTrigger(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { - require.Equal(t, uint64(6), ts.Height()) + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + require.Equal(t, abi.ChainEpoch(6), ts.Height()) require.Equal(t, 8, int(curH)) applied = true return nil @@ -340,14 +378,14 @@ func TestAtNullConf(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { require.Equal(t, 5, int(ts.Height())) require.Equal(t, 8, int(curH)) applied = true @@ -379,7 +417,7 @@ func TestAtStart(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) @@ -388,7 +426,7 @@ func TestAtStart(t *testing.T) { var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { require.Equal(t, 5, int(ts.Height())) require.Equal(t, 8, int(curH)) applied = true @@ -413,7 +451,7 @@ func TestAtStartConfidence(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) @@ -422,7 +460,7 @@ func TestAtStartConfidence(t *testing.T) { var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { require.Equal(t, 5, int(ts.Height())) require.Equal(t, 11, int(curH)) applied = true @@ -443,15 +481,15 @@ func TestAtChained(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { - return events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + return events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { require.Equal(t, 10, int(ts.Height())) applied = true return nil @@ -477,7 +515,7 @@ func TestAtChainedConfidence(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) @@ -486,8 +524,8 @@ func TestAtChainedConfidence(t *testing.T) { var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { - return events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { + return events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { require.Equal(t, 10, int(ts.Height())) applied = true return nil @@ -511,7 +549,7 @@ func TestAtChainedConfidenceNull(t *testing.T) { h: 1, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) @@ -520,7 +558,7 @@ func TestAtChainedConfidenceNull(t *testing.T) { var applied bool var reverted bool - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH uint64) error { + err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { applied = true require.Equal(t, 6, int(ts.Height())) return nil @@ -534,9 +572,9 @@ func TestAtChainedConfidenceNull(t *testing.T) { require.Equal(t, false, reverted) } -func matchAddrMethod(to address.Address, m uint64) func(msg *types.Message) (bool, error) { - return func(msg *types.Message) (bool, error) { - return to == msg.To && m == msg.Method, nil +func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (matchOnce bool, matched bool, err error) { + return func(msg *types.Message) (matchOnce bool, matched bool, err error) { + return true, to == msg.To && m == msg.Method, nil } } @@ -549,7 +587,7 @@ func TestCalled(t *testing.T) { blkMsgs: map[cid.Cid]cid.Cid{}, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) @@ -560,11 +598,11 @@ func TestCalled(t *testing.T) { var applied, reverted bool var appliedMsg *types.Message var appliedTs *types.TipSet - var appliedH uint64 + var appliedH abi.ChainEpoch err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH uint64) (bool, error) { + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { require.Equal(t, false, applied) applied = true appliedMsg = msg @@ -610,12 +648,12 @@ func TestCalled(t *testing.T) { require.Equal(t, false, applied) require.Equal(t, false, reverted) - require.Equal(t, uint64(7), appliedTs.Height()) + require.Equal(t, abi.ChainEpoch(7), appliedTs.Height()) require.Equal(t, "bafkqaaa", appliedTs.Blocks()[0].Messages.String()) - require.Equal(t, uint64(10), appliedH) + require.Equal(t, abi.ChainEpoch(10), appliedH) require.Equal(t, t0123, appliedMsg.To) require.Equal(t, uint64(1), appliedMsg.Nonce) - require.Equal(t, uint64(5), appliedMsg.Method) + require.Equal(t, abi.MethodNum(5), appliedMsg.Method) // revert some blocks, keep the message @@ -625,7 +663,7 @@ func TestCalled(t *testing.T) { // revert the message - fcs.advance(2, 1, nil) // H=7, we reverted ts with the msg + fcs.advance(2, 1, nil) // H=7, we reverted ts with the msg execution, but not the msg itself require.Equal(t, false, applied) require.Equal(t, true, reverted) @@ -639,24 +677,31 @@ func TestCalled(t *testing.T) { }, }) - fcs.advance(0, 5, map[int]cid.Cid{ // (confidence=3) + fcs.advance(0, 3, map[int]cid.Cid{ // (n2msg confidence=1) 0: n2msg, }) + require.Equal(t, true, applied) // msg from H=7, which had reverted execution + require.Equal(t, false, reverted) + require.Equal(t, abi.ChainEpoch(10), appliedH) + applied = false + + fcs.advance(0, 2, nil) // (confidence=3) + require.Equal(t, true, applied) require.Equal(t, false, reverted) applied = false - require.Equal(t, uint64(9), appliedTs.Height()) + require.Equal(t, abi.ChainEpoch(9), appliedTs.Height()) require.Equal(t, "bafkqaaa", appliedTs.Blocks()[0].Messages.String()) - require.Equal(t, uint64(12), appliedH) + require.Equal(t, abi.ChainEpoch(12), appliedH) require.Equal(t, t0123, appliedMsg.To) require.Equal(t, uint64(2), appliedMsg.Nonce) - require.Equal(t, uint64(5), appliedMsg.Method) + require.Equal(t, abi.MethodNum(5), appliedMsg.Method) // revert and apply at different height - fcs.advance(4, 6, map[int]cid.Cid{ // (confidence=3) + fcs.advance(8, 6, map[int]cid.Cid{ // (confidence=3) 1: n2msg, }) @@ -668,12 +713,12 @@ func TestCalled(t *testing.T) { reverted = false applied = false - require.Equal(t, uint64(11), appliedTs.Height()) + require.Equal(t, abi.ChainEpoch(7), appliedTs.Height()) require.Equal(t, "bafkqaaa", appliedTs.Blocks()[0].Messages.String()) - require.Equal(t, uint64(14), appliedH) + require.Equal(t, abi.ChainEpoch(10), appliedH) require.Equal(t, t0123, appliedMsg.To) require.Equal(t, uint64(2), appliedMsg.Nonce) - require.Equal(t, uint64(5), appliedMsg.Method) + require.Equal(t, abi.MethodNum(5), appliedMsg.Method) // call method again @@ -694,7 +739,7 @@ func TestCalled(t *testing.T) { }), }) - fcs.advance(1, 4, nil) // H=19, but message reverted + fcs.advance(2, 5, nil) // H=19, but message reverted require.Equal(t, false, applied) require.Equal(t, false, reverted) @@ -754,7 +799,7 @@ func TestCalledTimeout(t *testing.T) { blkMsgs: map[cid.Cid]cid.Cid{}, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) @@ -765,11 +810,11 @@ func TestCalledTimeout(t *testing.T) { err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH uint64) (bool, error) { + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { called = true require.Nil(t, msg) - require.Equal(t, uint64(20), ts.Height()) - require.Equal(t, uint64(23), curH) + require.Equal(t, abi.ChainEpoch(20), ts.Height()) + require.Equal(t, abi.ChainEpoch(23), curH) return false, nil }, func(_ context.Context, ts *types.TipSet) error { t.Fatal("revert on timeout") @@ -794,17 +839,17 @@ func TestCalledTimeout(t *testing.T) { blkMsgs: map[cid.Cid]cid.Cid{}, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events = NewEvents(context.Background(), fcs) err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { return true, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH uint64) (bool, error) { + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { called = true require.Nil(t, msg) - require.Equal(t, uint64(20), ts.Height()) - require.Equal(t, uint64(23), curH) + require.Equal(t, abi.ChainEpoch(20), ts.Height()) + require.Equal(t, abi.ChainEpoch(23), curH) return false, nil }, func(_ context.Context, ts *types.TipSet) error { t.Fatal("revert on timeout") @@ -828,7 +873,7 @@ func TestCalledOrder(t *testing.T) { blkMsgs: map[cid.Cid]cid.Cid{}, tsc: newTSCache(2*build.ForkLengthThreshold, nil), } - require.NoError(t, fcs.tsc.add(makeTs(t, 1, dummyCid))) + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) events := NewEvents(context.Background(), fcs) @@ -839,14 +884,14 @@ func TestCalledOrder(t *testing.T) { err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH uint64) (bool, error) { + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { switch at { case 0: require.Equal(t, uint64(1), msg.Nonce) - require.Equal(t, uint64(4), ts.Height()) + require.Equal(t, abi.ChainEpoch(4), ts.Height()) case 1: require.Equal(t, uint64(2), msg.Nonce) - require.Equal(t, uint64(5), ts.Height()) + require.Equal(t, abi.ChainEpoch(5), ts.Height()) default: t.Fatal("apply should only get called twice, at: ", at) } @@ -855,9 +900,9 @@ func TestCalledOrder(t *testing.T) { }, func(_ context.Context, ts *types.TipSet) error { switch at { case 2: - require.Equal(t, uint64(5), ts.Height()) + require.Equal(t, abi.ChainEpoch(5), ts.Height()) case 3: - require.Equal(t, uint64(4), ts.Height()) + require.Equal(t, abi.ChainEpoch(4), ts.Height()) default: t.Fatal("revert should only get called twice, at: ", at) } @@ -881,3 +926,400 @@ func TestCalledOrder(t *testing.T) { fcs.advance(9, 1, nil) } + +func TestCalledNull(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + events := NewEvents(context.Background(), fcs) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + more := true + var applied, reverted bool + + err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + require.Equal(t, false, applied) + applied = true + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + // create few blocks to make sure nothing get's randomly called + + fcs.advance(0, 4, nil) // H=5 + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create blocks with message (but below confidence threshold) + + fcs.advance(0, 3, map[int]cid.Cid{ // msg at H=6; H=8 (confidence=2) + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + }, + }), + }) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional blocks so we are above confidence threshold, but with null tipset at the height + // of application + + fcs.advance(0, 3, nil, 10) // H=11 (confidence=3, apply) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + fcs.advance(5, 1, nil, 10) + + require.Equal(t, false, applied) + require.Equal(t, true, reverted) +} + +func TestRemoveTriggersOnMessage(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + events := NewEvents(context.Background(), fcs) + + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + more := true + var applied, reverted bool + + err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { + require.Equal(t, false, applied) + applied = true + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, 3, 20, matchAddrMethod(t0123, 5)) + require.NoError(t, err) + + // create few blocks to make sure nothing get's randomly called + + fcs.advance(0, 4, nil) // H=5 + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create blocks with message (but below confidence threshold) + + fcs.advance(0, 3, map[int]cid.Cid{ // msg occurs at H=5, applied at H=6; H=8 (confidence=2) + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 1}, + }, + }), + }) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // revert applied TS & message TS + fcs.advance(3, 1, nil) // H=6 (tipset message applied in reverted, AND message reverted) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional blocks so we are above confidence threshold, but message not applied + // as it was reverted + fcs.advance(0, 5, nil) // H=11 (confidence=3, apply) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create blocks with message again (but below confidence threshold) + + fcs.advance(0, 3, map[int]cid.Cid{ // msg occurs at H=12, applied at H=13; H=15 (confidence=2) + 0: fcs.fakeMsgs(fakeMsg{ + bmsgs: []*types.Message{ + {To: t0123, From: t0123, Method: 5, Nonce: 2}, + }, + }), + }) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // revert applied height TS, but don't remove message trigger + fcs.advance(2, 1, nil) // H=13 (tipset message applied in reverted, by tipset with message not reverted) + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional blocks so we are above confidence threshold + fcs.advance(0, 4, nil) // H=18 (confidence=3, apply) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) +} + +type testStateChange struct { + from string + to string +} + +func TestStateChanged(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + events := NewEvents(context.Background(), fcs) + + more := true + var applied, reverted bool + var appliedData StateChange + var appliedOldTs *types.TipSet + var appliedNewTs *types.TipSet + var appliedH abi.ChainEpoch + var matchData StateChange + + confidence := 3 + timeout := abi.ChainEpoch(20) + + err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { + require.Equal(t, false, applied) + applied = true + appliedData = data + appliedOldTs = oldTs + appliedNewTs = newTs + appliedH = curH + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { + if matchData == nil { + return false, matchData, nil + } + + d := matchData + matchData = nil + return true, d, nil + }) + require.NoError(t, err) + + // create few blocks to make sure nothing get's randomly called + + fcs.advance(0, 4, nil) // H=5 + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create state change (but below confidence threshold) + matchData = testStateChange{from: "a", to: "b"} + fcs.advance(0, 3, nil) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // create additional block so we are above confidence threshold + + fcs.advance(0, 2, nil) // H=10 (confidence=3, apply) + + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + // dip below confidence (should not apply again) + fcs.advance(2, 2, nil) // H=10 (confidence=3, apply) + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // Change happens from 5 -> 6 + require.Equal(t, abi.ChainEpoch(5), appliedOldTs.Height()) + require.Equal(t, abi.ChainEpoch(6), appliedNewTs.Height()) + + // Actually applied (with confidence) at 9 + require.Equal(t, abi.ChainEpoch(9), appliedH) + + // Make sure the state change was correctly passed through + rcvd := appliedData.(testStateChange) + require.Equal(t, "a", rcvd.from) + require.Equal(t, "b", rcvd.to) +} + +func TestStateChangedRevert(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + events := NewEvents(context.Background(), fcs) + + more := true + var applied, reverted bool + var matchData StateChange + + confidence := 1 + timeout := abi.ChainEpoch(20) + + err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { + require.Equal(t, false, applied) + applied = true + return more, nil + }, func(_ context.Context, ts *types.TipSet) error { + reverted = true + return nil + }, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { + if matchData == nil { + return false, matchData, nil + } + + d := matchData + matchData = nil + return true, d, nil + }) + require.NoError(t, err) + + fcs.advance(0, 2, nil) // H=3 + + // Make a state change from TS at height 3 to TS at height 4 + matchData = testStateChange{from: "a", to: "b"} + fcs.advance(0, 1, nil) // H=4 + + // Haven't yet reached confidence + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // Advance to reach confidence level + fcs.advance(0, 1, nil) // H=5 + + // Should now have called the handler + require.Equal(t, true, applied) + require.Equal(t, false, reverted) + applied = false + + // Advance 3 more TS + fcs.advance(0, 3, nil) // H=8 + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // Regress but not so far as to cause a revert + fcs.advance(3, 1, nil) // H=6 + + require.Equal(t, false, applied) + require.Equal(t, false, reverted) + + // Regress back to state where change happened + fcs.advance(3, 1, nil) // H=4 + + // Expect revert to have happened + require.Equal(t, false, applied) + require.Equal(t, true, reverted) +} + +func TestStateChangedTimeout(t *testing.T) { + fcs := &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + events := NewEvents(context.Background(), fcs) + + called := false + + err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { + return false, true, nil + }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { + called = true + require.Nil(t, data) + require.Equal(t, abi.ChainEpoch(20), newTs.Height()) + require.Equal(t, abi.ChainEpoch(23), curH) + return false, nil + }, func(_ context.Context, ts *types.TipSet) error { + t.Fatal("revert on timeout") + return nil + }, 3, 20, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { + return false, nil, nil + }) + + require.NoError(t, err) + + fcs.advance(0, 21, nil) + require.False(t, called) + + fcs.advance(0, 5, nil) + require.True(t, called) + called = false + + // with check func reporting done + + fcs = &fakeCS{ + t: t, + h: 1, + + msgs: map[cid.Cid]fakeMsg{}, + blkMsgs: map[cid.Cid]cid.Cid{}, + tsc: newTSCache(2*build.ForkLengthThreshold, nil), + } + require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) + + events = NewEvents(context.Background(), fcs) + + err = events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { + return true, true, nil + }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { + called = true + require.Nil(t, data) + require.Equal(t, abi.ChainEpoch(20), newTs.Height()) + require.Equal(t, abi.ChainEpoch(23), curH) + return false, nil + }, func(_ context.Context, ts *types.TipSet) error { + t.Fatal("revert on timeout") + return nil + }, 3, 20, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { + return false, nil, nil + }) + require.NoError(t, err) + + fcs.advance(0, 21, nil) + require.False(t, called) + + fcs.advance(0, 5, nil) + require.False(t, called) +} diff --git a/chain/events/state/ctxstore.go b/chain/events/state/ctxstore.go new file mode 100644 index 000000000..12b45e425 --- /dev/null +++ b/chain/events/state/ctxstore.go @@ -0,0 +1,25 @@ +package state + +import ( + "context" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" +) + +type contextStore struct { + ctx context.Context + cst *cbor.BasicIpldStore +} + +func (cs *contextStore) Context() context.Context { + return cs.ctx +} + +func (cs *contextStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { + return cs.cst.Get(ctx, c, out) +} + +func (cs *contextStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { + return cs.cst.Put(ctx, v) +} diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go new file mode 100644 index 000000000..99b8480dc --- /dev/null +++ b/chain/events/state/predicates.go @@ -0,0 +1,597 @@ +package state + +import ( + "bytes" + "context" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + cbor "github.com/ipfs/go-ipld-cbor" + typegen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/api/apibstore" + "github.com/filecoin-project/lotus/chain/actors/adt" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + "github.com/filecoin-project/lotus/chain/types" +) + +// UserData is the data returned from the DiffTipSetKeyFunc +type UserData interface{} + +// ChainAPI abstracts out calls made by this class to external APIs +type ChainAPI interface { + apibstore.ChainIO + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) +} + +// StatePredicates has common predicates for responding to state changes +type StatePredicates struct { + api ChainAPI + cst *cbor.BasicIpldStore +} + +func NewStatePredicates(api ChainAPI) *StatePredicates { + return &StatePredicates{ + api: api, + cst: cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), + } +} + +// DiffTipSetKeyFunc check if there's a change form oldState to newState, and returns +// - changed: was there a change +// - user: user-defined data representing the state change +// - err +type DiffTipSetKeyFunc func(ctx context.Context, oldState, newState types.TipSetKey) (changed bool, user UserData, err error) + +type DiffActorStateFunc func(ctx context.Context, oldActorState *types.Actor, newActorState *types.Actor) (changed bool, user UserData, err error) + +// OnActorStateChanged calls diffStateFunc when the state changes for the given actor +func (sp *StatePredicates) OnActorStateChanged(addr address.Address, diffStateFunc DiffActorStateFunc) DiffTipSetKeyFunc { + return func(ctx context.Context, oldState, newState types.TipSetKey) (changed bool, user UserData, err error) { + oldActor, err := sp.api.StateGetActor(ctx, addr, oldState) + if err != nil { + return false, nil, err + } + newActor, err := sp.api.StateGetActor(ctx, addr, newState) + if err != nil { + return false, nil, err + } + + if oldActor.Head.Equals(newActor.Head) { + return false, nil, nil + } + return diffStateFunc(ctx, oldActor, newActor) + } +} + +type DiffStorageMarketStateFunc func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) + +// OnStorageMarketActorChanged calls diffStorageMarketState when the state changes for the market actor +func (sp *StatePredicates) OnStorageMarketActorChanged(diffStorageMarketState DiffStorageMarketStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(market.Address, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := market.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := market.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffStorageMarketState(ctx, oldState, newState) + }) +} + +type BalanceTables struct { + EscrowTable market.BalanceTable + LockedTable market.BalanceTable +} + +// DiffBalanceTablesFunc compares two balance tables +type DiffBalanceTablesFunc func(ctx context.Context, oldBalanceTable, newBalanceTable BalanceTables) (changed bool, user UserData, err error) + +// OnBalanceChanged runs when the escrow table for available balances changes +func (sp *StatePredicates) OnBalanceChanged(diffBalances DiffBalanceTablesFunc) DiffStorageMarketStateFunc { + return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) { + bc, err := oldState.BalancesChanged(newState) + if err != nil { + return false, nil, err + } + + if !bc { + return false, nil, nil + } + + oldEscrowRoot, err := oldState.EscrowTable() + if err != nil { + return false, nil, err + } + + oldLockedRoot, err := oldState.LockedTable() + if err != nil { + return false, nil, err + } + + newEscrowRoot, err := newState.EscrowTable() + if err != nil { + return false, nil, err + } + + newLockedRoot, err := newState.LockedTable() + if err != nil { + return false, nil, err + } + + return diffBalances(ctx, BalanceTables{oldEscrowRoot, oldLockedRoot}, BalanceTables{newEscrowRoot, newLockedRoot}) + } +} + +type DiffDealStatesFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot market.DealStates) (changed bool, user UserData, err error) +type DiffDealProposalsFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot market.DealProposals) (changed bool, user UserData, err error) +type DiffAdtArraysFunc func(ctx context.Context, oldDealStateRoot, newDealStateRoot adt.Array) (changed bool, user UserData, err error) + +// OnDealStateChanged calls diffDealStates when the market deal state changes +func (sp *StatePredicates) OnDealStateChanged(diffDealStates DiffDealStatesFunc) DiffStorageMarketStateFunc { + return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) { + sc, err := oldState.StatesChanged(newState) + if err != nil { + return false, nil, err + } + + if !sc { + return false, nil, nil + } + + oldRoot, err := oldState.States() + if err != nil { + return false, nil, err + } + newRoot, err := newState.States() + if err != nil { + return false, nil, err + } + + return diffDealStates(ctx, oldRoot, newRoot) + } +} + +// OnDealProposalChanged calls diffDealProps when the market proposal state changes +func (sp *StatePredicates) OnDealProposalChanged(diffDealProps DiffDealProposalsFunc) DiffStorageMarketStateFunc { + return func(ctx context.Context, oldState market.State, newState market.State) (changed bool, user UserData, err error) { + pc, err := oldState.ProposalsChanged(newState) + if err != nil { + return false, nil, err + } + + if !pc { + return false, nil, nil + } + + oldRoot, err := oldState.Proposals() + if err != nil { + return false, nil, err + } + newRoot, err := newState.Proposals() + if err != nil { + return false, nil, err + } + + return diffDealProps(ctx, oldRoot, newRoot) + } +} + +// OnDealProposalAmtChanged detects changes in the deal proposal AMT for all deal proposals and returns a MarketProposalsChanges structure containing: +// - Added Proposals +// - Modified Proposals +// - Removed Proposals +func (sp *StatePredicates) OnDealProposalAmtChanged() DiffDealProposalsFunc { + return func(ctx context.Context, oldDealProps, newDealProps market.DealProposals) (changed bool, user UserData, err error) { + proposalChanges, err := market.DiffDealProposals(oldDealProps, newDealProps) + if err != nil { + return false, nil, err + } + + if len(proposalChanges.Added)+len(proposalChanges.Removed) == 0 { + return false, nil, nil + } + + return true, proposalChanges, nil + } +} + +// OnDealStateAmtChanged detects changes in the deal state AMT for all deal states and returns a MarketDealStateChanges structure containing: +// - Added Deals +// - Modified Deals +// - Removed Deals +func (sp *StatePredicates) OnDealStateAmtChanged() DiffDealStatesFunc { + return func(ctx context.Context, oldDealStates, newDealStates market.DealStates) (changed bool, user UserData, err error) { + dealStateChanges, err := market.DiffDealStates(oldDealStates, newDealStates) + if err != nil { + return false, nil, err + } + + if len(dealStateChanges.Added)+len(dealStateChanges.Modified)+len(dealStateChanges.Removed) == 0 { + return false, nil, nil + } + + return true, dealStateChanges, nil + } +} + +// ChangedDeals is a set of changes to deal state +type ChangedDeals map[abi.DealID]market.DealStateChange + +// DealStateChangedForIDs detects changes in the deal state AMT for the given deal IDs +func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDealStatesFunc { + return func(ctx context.Context, oldDealStates, newDealStates market.DealStates) (changed bool, user UserData, err error) { + changedDeals := make(ChangedDeals) + for _, dealID := range dealIds { + + // If the deal has been removed, we just set it to nil + oldDeal, oldFound, err := oldDealStates.Get(dealID) + if err != nil { + return false, nil, err + } + + newDeal, newFound, err := newDealStates.Get(dealID) + if err != nil { + return false, nil, err + } + + existenceChanged := oldFound != newFound + valueChanged := (oldFound && newFound) && *oldDeal != *newDeal + if existenceChanged || valueChanged { + changedDeals[dealID] = market.DealStateChange{ID: dealID, From: oldDeal, To: newDeal} + } + } + if len(changedDeals) > 0 { + return true, changedDeals, nil + } + return false, nil, nil + } +} + +// ChangedBalances is a set of changes to deal state +type ChangedBalances map[address.Address]BalanceChange + +// BalanceChange is a change in balance from -> to +type BalanceChange struct { + From abi.TokenAmount + To abi.TokenAmount +} + +// AvailableBalanceChangedForAddresses detects changes in the escrow table for the given addresses +func (sp *StatePredicates) AvailableBalanceChangedForAddresses(getAddrs func() []address.Address) DiffBalanceTablesFunc { + return func(ctx context.Context, oldBalances, newBalances BalanceTables) (changed bool, user UserData, err error) { + changedBalances := make(ChangedBalances) + addrs := getAddrs() + for _, addr := range addrs { + // If the deal has been removed, we just set it to nil + oldEscrowBalance, err := oldBalances.EscrowTable.Get(addr) + if err != nil { + return false, nil, err + } + + oldLockedBalance, err := oldBalances.LockedTable.Get(addr) + if err != nil { + return false, nil, err + } + + oldBalance := big.Sub(oldEscrowBalance, oldLockedBalance) + + newEscrowBalance, err := newBalances.EscrowTable.Get(addr) + if err != nil { + return false, nil, err + } + + newLockedBalance, err := newBalances.LockedTable.Get(addr) + if err != nil { + return false, nil, err + } + + newBalance := big.Sub(newEscrowBalance, newLockedBalance) + + if !oldBalance.Equals(newBalance) { + changedBalances[addr] = BalanceChange{oldBalance, newBalance} + } + } + if len(changedBalances) > 0 { + return true, changedBalances, nil + } + return false, nil, nil + } +} + +type DiffMinerActorStateFunc func(ctx context.Context, oldState miner.State, newState miner.State) (changed bool, user UserData, err error) + +func (sp *StatePredicates) OnInitActorChange(diffInitActorState DiffInitActorStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(init_.Address, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := init_.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := init_.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffInitActorState(ctx, oldState, newState) + }) + +} + +func (sp *StatePredicates) OnMinerActorChange(minerAddr address.Address, diffMinerActorState DiffMinerActorStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(minerAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := miner.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := miner.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffMinerActorState(ctx, oldState, newState) + }) +} + +func (sp *StatePredicates) OnMinerSectorChange() DiffMinerActorStateFunc { + return func(ctx context.Context, oldState, newState miner.State) (changed bool, user UserData, err error) { + sectorChanges, err := miner.DiffSectors(oldState, newState) + if err != nil { + return false, nil, err + } + // nothing changed + if len(sectorChanges.Added)+len(sectorChanges.Extended)+len(sectorChanges.Removed) == 0 { + return false, nil, nil + } + + return true, sectorChanges, nil + } +} + +func (sp *StatePredicates) OnMinerPreCommitChange() DiffMinerActorStateFunc { + return func(ctx context.Context, oldState, newState miner.State) (changed bool, user UserData, err error) { + precommitChanges, err := miner.DiffPreCommits(oldState, newState) + if err != nil { + return false, nil, err + } + + if len(precommitChanges.Added)+len(precommitChanges.Removed) == 0 { + return false, nil, nil + } + + return true, precommitChanges, nil + } +} + +// DiffPaymentChannelStateFunc is function that compares two states for the payment channel +type DiffPaymentChannelStateFunc func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error) + +// OnPaymentChannelActorChanged calls diffPaymentChannelState when the state changes for the the payment channel actor +func (sp *StatePredicates) OnPaymentChannelActorChanged(paychAddr address.Address, diffPaymentChannelState DiffPaymentChannelStateFunc) DiffTipSetKeyFunc { + return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { + oldState, err := paych.Load(adt.WrapStore(ctx, sp.cst), oldActorState) + if err != nil { + return false, nil, err + } + newState, err := paych.Load(adt.WrapStore(ctx, sp.cst), newActorState) + if err != nil { + return false, nil, err + } + return diffPaymentChannelState(ctx, oldState, newState) + }) +} + +// PayChToSendChange is a difference in the amount to send on a payment channel when the money is collected +type PayChToSendChange struct { + OldToSend abi.TokenAmount + NewToSend abi.TokenAmount +} + +// OnToSendAmountChanges monitors changes on the total amount to send from one party to the other on a payment channel +func (sp *StatePredicates) OnToSendAmountChanges() DiffPaymentChannelStateFunc { + return func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error) { + ots, err := oldState.ToSend() + if err != nil { + return false, nil, err + } + + nts, err := newState.ToSend() + if err != nil { + return false, nil, err + } + + if ots.Equals(nts) { + return false, nil, nil + } + return true, &PayChToSendChange{ + OldToSend: ots, + NewToSend: nts, + }, nil + } +} + +type AddressPair struct { + ID address.Address + PK address.Address +} + +type InitActorAddressChanges struct { + Added []AddressPair + Modified []AddressChange + Removed []AddressPair +} + +type AddressChange struct { + From AddressPair + To AddressPair +} + +type DiffInitActorStateFunc func(ctx context.Context, oldState init_.State, newState init_.State) (changed bool, user UserData, err error) + +func (i *InitActorAddressChanges) AsKey(key string) (abi.Keyer, error) { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return nil, err + } + return abi.AddrKey(addr), nil +} + +func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Added = append(i.Added, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +func (i *InitActorAddressChanges) Modify(key string, from, to *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + + fromID := new(typegen.CborInt) + if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil { + return err + } + fromIDAddr, err := address.NewIDAddress(uint64(*fromID)) + if err != nil { + return err + } + + toID := new(typegen.CborInt) + if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil { + return err + } + toIDAddr, err := address.NewIDAddress(uint64(*toID)) + if err != nil { + return err + } + + i.Modified = append(i.Modified, AddressChange{ + From: AddressPair{ + ID: fromIDAddr, + PK: pkAddr, + }, + To: AddressPair{ + ID: toIDAddr, + PK: pkAddr, + }, + }) + return nil +} + +func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) error { + pkAddr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + id := new(typegen.CborInt) + if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + idAddr, err := address.NewIDAddress(uint64(*id)) + if err != nil { + return err + } + i.Removed = append(i.Removed, AddressPair{ + ID: idAddr, + PK: pkAddr, + }) + return nil +} + +func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc { + return func(ctx context.Context, oldState, newState init_.State) (changed bool, user UserData, err error) { + addressChanges := &InitActorAddressChanges{ + Added: []AddressPair{}, + Modified: []AddressChange{}, + Removed: []AddressPair{}, + } + + err = oldState.ForEachActor(func(oldId abi.ActorID, oldAddress address.Address) error { + oldIdAddress, err := address.NewIDAddress(uint64(oldId)) + if err != nil { + return err + } + + newIdAddress, found, err := newState.ResolveAddress(oldAddress) + if err != nil { + return err + } + + if !found { + addressChanges.Removed = append(addressChanges.Removed, AddressPair{ + ID: oldIdAddress, + PK: oldAddress, + }) + } + + if oldIdAddress != newIdAddress { + addressChanges.Modified = append(addressChanges.Modified, AddressChange{ + From: AddressPair{ + ID: oldIdAddress, + PK: oldAddress, + }, + To: AddressPair{ + ID: newIdAddress, + PK: oldAddress, + }, + }) + } + + return nil + }) + + if err != nil { + return false, nil, err + } + + err = newState.ForEachActor(func(newId abi.ActorID, newAddress address.Address) error { + newIdAddress, err := address.NewIDAddress(uint64(newId)) + if err != nil { + return err + } + + _, found, err := newState.ResolveAddress(newAddress) + if err != nil { + return err + } + + if !found { + addressChanges.Added = append(addressChanges.Added, AddressPair{ + ID: newIdAddress, + PK: newAddress, + }) + } + + return nil + }) + + if err != nil { + return false, nil, err + } + + if len(addressChanges.Added)+len(addressChanges.Removed)+len(addressChanges.Modified) == 0 { + return false, nil, nil + } + + return true, addressChanges, nil + } +} diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go new file mode 100644 index 000000000..461ac4997 --- /dev/null +++ b/chain/events/state/predicates_test.go @@ -0,0 +1,632 @@ +package state + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + + "github.com/filecoin-project/go-bitfield" + + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + + "github.com/ipfs/go-cid" + cbornode "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/filecoin-project/specs-actors/actors/util/adt" + tutils "github.com/filecoin-project/specs-actors/support/testing" + + "github.com/filecoin-project/lotus/chain/types" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +var dummyCid cid.Cid + +func init() { + dummyCid, _ = cid.Parse("bafkqaaa") +} + +type mockAPI struct { + ts map[types.TipSetKey]*types.Actor + bs bstore.Blockstore +} + +func newMockAPI(bs bstore.Blockstore) *mockAPI { + return &mockAPI{ + bs: bs, + ts: make(map[types.TipSetKey]*types.Actor), + } +} + +func (m mockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return m.bs.Has(c) +} + +func (m mockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + blk, err := m.bs.Get(c) + if err != nil { + return nil, xerrors.Errorf("blockstore get: %w", err) + } + + return blk.RawData(), nil +} + +func (m mockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + return m.ts[tsk], nil +} + +func (m mockAPI) setActor(tsk types.TipSetKey, act *types.Actor) { + m.ts[tsk] = act +} + +func TestMarketPredicates(t *testing.T) { + ctx := context.Background() + bs := bstore.NewTemporarySync() + store := adt.WrapStore(ctx, cbornode.NewCborStore(bs)) + + oldDeal1 := &market0.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + SlashEpoch: 0, + } + oldDeal2 := &market0.DealState{ + SectorStartEpoch: 4, + LastUpdatedEpoch: 5, + SlashEpoch: 0, + } + oldDeals := map[abi.DealID]*market0.DealState{ + abi.DealID(1): oldDeal1, + abi.DealID(2): oldDeal2, + } + + oldProp1 := &market0.DealProposal{ + PieceCID: dummyCid, + PieceSize: 0, + VerifiedDeal: false, + Client: tutils.NewIDAddr(t, 1), + Provider: tutils.NewIDAddr(t, 1), + StartEpoch: 1, + EndEpoch: 2, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + oldProp2 := &market0.DealProposal{ + PieceCID: dummyCid, + PieceSize: 0, + VerifiedDeal: false, + Client: tutils.NewIDAddr(t, 1), + Provider: tutils.NewIDAddr(t, 1), + StartEpoch: 2, + EndEpoch: 3, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + oldProps := map[abi.DealID]*market0.DealProposal{ + abi.DealID(1): oldProp1, + abi.DealID(2): oldProp2, + } + + oldBalances := map[address.Address]balance{ + tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)}, + tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, + tutils.NewIDAddr(t, 3): {abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)}, + tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)}, + } + + oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances) + + newDeal1 := &market0.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 3, + SlashEpoch: 0, + } + + // deal 2 removed + + // added + newDeal3 := &market0.DealState{ + SectorStartEpoch: 1, + LastUpdatedEpoch: 2, + SlashEpoch: 3, + } + newDeals := map[abi.DealID]*market0.DealState{ + abi.DealID(1): newDeal1, + // deal 2 was removed + abi.DealID(3): newDeal3, + } + + // added + newProp3 := &market0.DealProposal{ + PieceCID: dummyCid, + PieceSize: 0, + VerifiedDeal: false, + Client: tutils.NewIDAddr(t, 1), + Provider: tutils.NewIDAddr(t, 1), + StartEpoch: 4, + EndEpoch: 4, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: big.Zero(), + ClientCollateral: big.Zero(), + } + newProps := map[abi.DealID]*market0.DealProposal{ + abi.DealID(1): oldProp1, // 1 was persisted + // prop 2 was removed + abi.DealID(3): newProp3, // new + // NB: DealProposals cannot be modified, so don't test that case. + } + newBalances := map[address.Address]balance{ + tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(3000), abi.NewTokenAmount(0)}, + tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, + tutils.NewIDAddr(t, 4): {abi.NewTokenAmount(5000), abi.NewTokenAmount(0)}, + tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)}, + } + + newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances) + + minerAddr, err := address.NewFromString("t00") + require.NoError(t, err) + oldState, err := mockTipset(minerAddr, 1) + require.NoError(t, err) + newState, err := mockTipset(minerAddr, 2) + require.NoError(t, err) + + api := newMockAPI(bs) + api.setActor(oldState.Key(), &types.Actor{Code: builtin0.StorageMarketActorCodeID, Head: oldStateC}) + api.setActor(newState.Key(), &types.Actor{Code: builtin0.StorageMarketActorCodeID, Head: newStateC}) + + t.Run("deal ID predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + + dealIds := []abi.DealID{abi.DealID(1), abi.DealID(2)} + diffIDFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(dealIds))) + + // Diff a state against itself: expect no change + changed, _, err := diffIDFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Diff old state against new state + changed, valIDs, err := diffIDFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedDealIDs, ok := valIDs.(ChangedDeals) + require.True(t, ok) + require.Len(t, changedDealIDs, 2) + require.Contains(t, changedDealIDs, abi.DealID(1)) + require.Contains(t, changedDealIDs, abi.DealID(2)) + deal1 := changedDealIDs[abi.DealID(1)] + if deal1.From.LastUpdatedEpoch != 2 || deal1.To.LastUpdatedEpoch != 3 { + t.Fatal("Unexpected change to LastUpdatedEpoch") + } + deal2 := changedDealIDs[abi.DealID(2)] + if deal2.From.LastUpdatedEpoch != 5 || deal2.To != nil { + t.Fatal("Expected To to be nil") + } + + // Diff with non-existent deal. + noDeal := []abi.DealID{4} + diffNoDealFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(noDeal))) + changed, _, err = diffNoDealFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Test that OnActorStateChanged does not call the callback if the state has not changed + mockAddr, err := address.NewFromString("t01") + require.NoError(t, err) + actorDiffFn := preds.OnActorStateChanged(mockAddr, func(context.Context, *types.Actor, *types.Actor) (bool, UserData, error) { + t.Fatal("No state change so this should not be called") + return false, nil, nil + }) + changed, _, err = actorDiffFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Test that OnDealStateChanged does not call the callback if the state has not changed + diffDealStateFn := preds.OnDealStateChanged(func(context.Context, market.DealStates, market.DealStates) (bool, UserData, error) { + t.Fatal("No state change so this should not be called") + return false, nil, nil + }) + marketState0 := createEmptyMarketState(t, store) + marketCid, err := store.Put(ctx, marketState0) + require.NoError(t, err) + marketState, err := market.Load(store, &types.Actor{ + Code: builtin0.StorageMarketActorCodeID, + Head: marketCid, + }) + require.NoError(t, err) + changed, _, err = diffDealStateFn(ctx, marketState, marketState) + require.NoError(t, err) + require.False(t, changed) + }) + + t.Run("deal state array predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + diffArrFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.OnDealStateAmtChanged())) + + changed, _, err := diffArrFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + changed, valArr, err := diffArrFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedDeals, ok := valArr.(*market.DealStateChanges) + require.True(t, ok) + require.Len(t, changedDeals.Added, 1) + require.Equal(t, abi.DealID(3), changedDeals.Added[0].ID) + require.True(t, dealEquality(*newDeal3, changedDeals.Added[0].Deal)) + + require.Len(t, changedDeals.Removed, 1) + + require.Len(t, changedDeals.Modified, 1) + require.Equal(t, abi.DealID(1), changedDeals.Modified[0].ID) + require.True(t, dealEquality(*newDeal1, *changedDeals.Modified[0].To)) + require.True(t, dealEquality(*oldDeal1, *changedDeals.Modified[0].From)) + + require.Equal(t, abi.DealID(2), changedDeals.Removed[0].ID) + }) + + t.Run("deal proposal array predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + diffArrFn := preds.OnStorageMarketActorChanged(preds.OnDealProposalChanged(preds.OnDealProposalAmtChanged())) + changed, _, err := diffArrFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + changed, valArr, err := diffArrFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedProps, ok := valArr.(*market.DealProposalChanges) + require.True(t, ok) + require.Len(t, changedProps.Added, 1) + require.Equal(t, abi.DealID(3), changedProps.Added[0].ID) + + // proposals cannot be modified -- no modified testing + + require.Len(t, changedProps.Removed, 1) + require.Equal(t, abi.DealID(2), changedProps.Removed[0].ID) + }) + + t.Run("balances predicate", func(t *testing.T) { + preds := NewStatePredicates(api) + + getAddresses := func() []address.Address { + return []address.Address{tutils.NewIDAddr(t, 1), tutils.NewIDAddr(t, 2), tutils.NewIDAddr(t, 3), tutils.NewIDAddr(t, 4)} + } + diffBalancesFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(getAddresses))) + + // Diff a state against itself: expect no change + changed, _, err := diffBalancesFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Diff old state against new state + changed, valIDs, err := diffBalancesFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, changed) + + changedBalances, ok := valIDs.(ChangedBalances) + require.True(t, ok) + require.Len(t, changedBalances, 3) + require.Contains(t, changedBalances, tutils.NewIDAddr(t, 1)) + require.Contains(t, changedBalances, tutils.NewIDAddr(t, 3)) + require.Contains(t, changedBalances, tutils.NewIDAddr(t, 4)) + + balance1 := changedBalances[tutils.NewIDAddr(t, 1)] + if !balance1.From.Equals(abi.NewTokenAmount(1000)) || !balance1.To.Equals(abi.NewTokenAmount(3000)) { + t.Fatal("Unexpected change to balance") + } + balance3 := changedBalances[tutils.NewIDAddr(t, 3)] + if !balance3.From.Equals(abi.NewTokenAmount(3000)) || !balance3.To.Equals(abi.NewTokenAmount(0)) { + t.Fatal("Unexpected change to balance") + } + balance4 := changedBalances[tutils.NewIDAddr(t, 4)] + if !balance4.From.Equals(abi.NewTokenAmount(0)) || !balance4.To.Equals(abi.NewTokenAmount(5000)) { + t.Fatal("Unexpected change to balance") + } + + // Diff with non-existent address. + getNoAddress := func() []address.Address { return []address.Address{tutils.NewIDAddr(t, 6)} } + diffNoAddressFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(getNoAddress))) + changed, _, err = diffNoAddressFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.False(t, changed) + + // Test that OnBalanceChanged does not call the callback if the state has not changed + diffDealBalancesFn := preds.OnBalanceChanged(func(context.Context, BalanceTables, BalanceTables) (bool, UserData, error) { + t.Fatal("No state change so this should not be called") + return false, nil, nil + }) + marketState0 := createEmptyMarketState(t, store) + marketCid, err := store.Put(ctx, marketState0) + require.NoError(t, err) + marketState, err := market.Load(store, &types.Actor{ + Code: builtin0.StorageMarketActorCodeID, + Head: marketCid, + }) + require.NoError(t, err) + changed, _, err = diffDealBalancesFn(ctx, marketState, marketState) + require.NoError(t, err) + require.False(t, changed) + }) + +} + +func TestMinerSectorChange(t *testing.T) { + ctx := context.Background() + bs := bstore.NewTemporarySync() + store := adt.WrapStore(ctx, cbornode.NewCborStore(bs)) + + nextID := uint64(0) + nextIDAddrF := func() address.Address { + defer func() { nextID++ }() + return tutils.NewIDAddr(t, nextID) + } + + owner, worker := nextIDAddrF(), nextIDAddrF() + si0 := newSectorOnChainInfo(0, tutils.MakeCID("0", &miner0.SealedCIDPrefix), big.NewInt(0), abi.ChainEpoch(0), abi.ChainEpoch(10)) + si1 := newSectorOnChainInfo(1, tutils.MakeCID("1", &miner0.SealedCIDPrefix), big.NewInt(1), abi.ChainEpoch(1), abi.ChainEpoch(11)) + si2 := newSectorOnChainInfo(2, tutils.MakeCID("2", &miner0.SealedCIDPrefix), big.NewInt(2), abi.ChainEpoch(2), abi.ChainEpoch(11)) + oldMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si0, si1, si2}) + + si3 := newSectorOnChainInfo(3, tutils.MakeCID("3", &miner0.SealedCIDPrefix), big.NewInt(3), abi.ChainEpoch(3), abi.ChainEpoch(12)) + // 0 delete + // 1 extend + // 2 same + // 3 added + si1Ext := si1 + si1Ext.Expiration++ + newMinerC := createMinerState(ctx, t, store, owner, worker, []miner.SectorOnChainInfo{si1Ext, si2, si3}) + + minerAddr := nextIDAddrF() + oldState, err := mockTipset(minerAddr, 1) + require.NoError(t, err) + newState, err := mockTipset(minerAddr, 2) + require.NoError(t, err) + + api := newMockAPI(bs) + api.setActor(oldState.Key(), &types.Actor{Head: oldMinerC, Code: builtin0.StorageMinerActorCodeID}) + api.setActor(newState.Key(), &types.Actor{Head: newMinerC, Code: builtin0.StorageMinerActorCodeID}) + + preds := NewStatePredicates(api) + + minerDiffFn := preds.OnMinerActorChange(minerAddr, preds.OnMinerSectorChange()) + change, val, err := minerDiffFn(ctx, oldState.Key(), newState.Key()) + require.NoError(t, err) + require.True(t, change) + require.NotNil(t, val) + + sectorChanges, ok := val.(*miner.SectorChanges) + require.True(t, ok) + + require.Equal(t, len(sectorChanges.Added), 1) + require.Equal(t, 1, len(sectorChanges.Added)) + require.Equal(t, si3, sectorChanges.Added[0]) + + require.Equal(t, 1, len(sectorChanges.Removed)) + require.Equal(t, si0, sectorChanges.Removed[0]) + + require.Equal(t, 1, len(sectorChanges.Extended)) + require.Equal(t, si1, sectorChanges.Extended[0].From) + require.Equal(t, si1Ext, sectorChanges.Extended[0].To) + + change, val, err = minerDiffFn(ctx, oldState.Key(), oldState.Key()) + require.NoError(t, err) + require.False(t, change) + require.Nil(t, val) + + change, val, err = minerDiffFn(ctx, newState.Key(), oldState.Key()) + require.NoError(t, err) + require.True(t, change) + require.NotNil(t, val) + + sectorChanges, ok = val.(*miner.SectorChanges) + require.True(t, ok) + + require.Equal(t, 1, len(sectorChanges.Added)) + require.Equal(t, si0, sectorChanges.Added[0]) + + require.Equal(t, 1, len(sectorChanges.Removed)) + require.Equal(t, si3, sectorChanges.Removed[0]) + + require.Equal(t, 1, len(sectorChanges.Extended)) + require.Equal(t, si1, sectorChanges.Extended[0].To) + require.Equal(t, si1Ext, sectorChanges.Extended[0].From) +} + +func mockTipset(minerAddr address.Address, timestamp uint64) (*types.TipSet, error) { + return types.NewTipSet([]*types.BlockHeader{{ + Miner: minerAddr, + Height: 5, + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + Timestamp: timestamp, + }}) +} + +type balance struct { + available abi.TokenAmount + locked abi.TokenAmount +} + +func createMarketState(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market0.DealState, props map[abi.DealID]*market0.DealProposal, balances map[address.Address]balance) cid.Cid { + dealRootCid := createDealAMT(ctx, t, store, deals) + propRootCid := createProposalAMT(ctx, t, store, props) + balancesCids := createBalanceTable(ctx, t, store, balances) + state := createEmptyMarketState(t, store) + state.States = dealRootCid + state.Proposals = propRootCid + state.EscrowTable = balancesCids[0] + state.LockedTable = balancesCids[1] + + stateC, err := store.Put(ctx, state) + require.NoError(t, err) + return stateC +} + +func createEmptyMarketState(t *testing.T, store adt.Store) *market0.State { + emptyArrayCid, err := adt.MakeEmptyArray(store).Root() + require.NoError(t, err) + emptyMap, err := adt.MakeEmptyMap(store).Root() + require.NoError(t, err) + return market0.ConstructState(emptyArrayCid, emptyMap, emptyMap) +} + +func createDealAMT(ctx context.Context, t *testing.T, store adt.Store, deals map[abi.DealID]*market0.DealState) cid.Cid { + root := adt.MakeEmptyArray(store) + for dealID, dealState := range deals { + err := root.Set(uint64(dealID), dealState) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} + +func createProposalAMT(ctx context.Context, t *testing.T, store adt.Store, props map[abi.DealID]*market0.DealProposal) cid.Cid { + root := adt.MakeEmptyArray(store) + for dealID, prop := range props { + err := root.Set(uint64(dealID), prop) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} + +func createBalanceTable(ctx context.Context, t *testing.T, store adt.Store, balances map[address.Address]balance) [2]cid.Cid { + escrowMapRoot := adt.MakeEmptyMap(store) + escrowMapRootCid, err := escrowMapRoot.Root() + require.NoError(t, err) + escrowRoot, err := adt.AsBalanceTable(store, escrowMapRootCid) + require.NoError(t, err) + lockedMapRoot := adt.MakeEmptyMap(store) + lockedMapRootCid, err := lockedMapRoot.Root() + require.NoError(t, err) + lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid) + require.NoError(t, err) + + for addr, balance := range balances { + err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked)) + require.NoError(t, err) + err = lockedRoot.Add(addr, balance.locked) + require.NoError(t, err) + + } + escrowRootCid, err := escrowRoot.Root() + require.NoError(t, err) + lockedRootCid, err := lockedRoot.Root() + require.NoError(t, err) + return [2]cid.Cid{escrowRootCid, lockedRootCid} +} + +func createMinerState(ctx context.Context, t *testing.T, store adt.Store, owner, worker address.Address, sectors []miner.SectorOnChainInfo) cid.Cid { + rootCid := createSectorsAMT(ctx, t, store, sectors) + + state := createEmptyMinerState(ctx, t, store, owner, worker) + state.Sectors = rootCid + + stateC, err := store.Put(ctx, state) + require.NoError(t, err) + return stateC +} + +func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, owner, worker address.Address) *miner0.State { + emptyArrayCid, err := adt.MakeEmptyArray(store).Root() + require.NoError(t, err) + emptyMap, err := adt.MakeEmptyMap(store).Root() + require.NoError(t, err) + + emptyDeadline, err := store.Put(store.Context(), miner0.ConstructDeadline(emptyArrayCid)) + require.NoError(t, err) + + emptyVestingFunds := miner0.ConstructVestingFunds() + emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds) + require.NoError(t, err) + + emptyDeadlines := miner0.ConstructDeadlines(emptyDeadline) + emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines) + require.NoError(t, err) + + minerInfo := emptyMap + + emptyBitfield := bitfield.NewFromSet(nil) + emptyBitfieldCid, err := store.Put(store.Context(), emptyBitfield) + require.NoError(t, err) + + state, err := miner0.ConstructState(minerInfo, 123, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid) + require.NoError(t, err) + return state + +} + +func createSectorsAMT(ctx context.Context, t *testing.T, store adt.Store, sectors []miner.SectorOnChainInfo) cid.Cid { + root := adt.MakeEmptyArray(store) + for _, sector := range sectors { + sector := (miner0.SectorOnChainInfo)(sector) + err := root.Set(uint64(sector.SectorNumber), §or) + require.NoError(t, err) + } + rootCid, err := root.Root() + require.NoError(t, err) + return rootCid +} + +// returns a unique SectorOnChainInfo with each invocation with SectorNumber set to `sectorNo`. +func newSectorOnChainInfo(sectorNo abi.SectorNumber, sealed cid.Cid, weight big.Int, activation, expiration abi.ChainEpoch) miner.SectorOnChainInfo { + info := newSectorPreCommitInfo(sectorNo, sealed, expiration) + return miner.SectorOnChainInfo{ + SectorNumber: info.SectorNumber, + SealProof: info.SealProof, + SealedCID: info.SealedCID, + DealIDs: info.DealIDs, + Expiration: info.Expiration, + + Activation: activation, + DealWeight: weight, + VerifiedDealWeight: weight, + InitialPledge: big.Zero(), + ExpectedDayReward: big.Zero(), + ExpectedStoragePledge: big.Zero(), + } +} + +const ( + sectorSealRandEpochValue = abi.ChainEpoch(1) +) + +// returns a unique SectorPreCommitInfo with each invocation with SectorNumber set to `sectorNo`. +func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiration abi.ChainEpoch) *miner0.SectorPreCommitInfo { + return &miner0.SectorPreCommitInfo{ + SealProof: abi.RegisteredSealProof_StackedDrg32GiBV1, + SectorNumber: sectorNo, + SealedCID: sealed, + SealRandEpoch: sectorSealRandEpochValue, + DealIDs: nil, + Expiration: expiration, + } +} + +func dealEquality(expected market0.DealState, actual market.DealState) bool { + return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch && + expected.SectorStartEpoch == actual.SectorStartEpoch && + expected.SlashEpoch == actual.SlashEpoch +} diff --git a/chain/events/tscache.go b/chain/events/tscache.go index 2e58a3f5b..d47c71480 100644 --- a/chain/events/tscache.go +++ b/chain/events/tscache.go @@ -3,12 +3,16 @@ package events import ( "context" + "github.com/filecoin-project/go-state-types/abi" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" ) -type tsByHFunc func(context.Context, uint64, *types.TipSet) (*types.TipSet, error) +type tsCacheAPI interface { + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainHead(context.Context) (*types.TipSet, error) +} // tipSetCache implements a simple ring-buffer cache to keep track of recent // tipsets @@ -17,10 +21,10 @@ type tipSetCache struct { start int len int - storage tsByHFunc + storage tsCacheAPI } -func newTSCache(cap int, storage tsByHFunc) *tipSetCache { +func newTSCache(cap abi.ChainEpoch, storage tsCacheAPI) *tipSetCache { return &tipSetCache{ cache: make([]*types.TipSet, cap), start: 0, @@ -77,7 +81,7 @@ func (tsc *tipSetCache) revert(ts *types.TipSet) error { return nil } -func (tsc *tipSetCache) getNonNull(height uint64) (*types.TipSet, error) { +func (tsc *tipSetCache) getNonNull(height abi.ChainEpoch) (*types.TipSet, error) { for { ts, err := tsc.get(height) if err != nil { @@ -90,10 +94,10 @@ func (tsc *tipSetCache) getNonNull(height uint64) (*types.TipSet, error) { } } -func (tsc *tipSetCache) get(height uint64) (*types.TipSet, error) { +func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) { if tsc.len == 0 { log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height) - return tsc.storage(context.TODO(), height, nil) + return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, types.EmptyTSK) } headH := tsc.cache[tsc.start].Height() @@ -113,14 +117,18 @@ func (tsc *tipSetCache) get(height uint64) (*types.TipSet, error) { if height < tail.Height() { log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tail.Height()) - return tsc.storage(context.TODO(), height, tail) + return tsc.storage.ChainGetTipSetByHeight(context.TODO(), height, tail.Key()) } return tsc.cache[normalModulo(tsc.start-int(headH-height), clen)], nil } -func (tsc *tipSetCache) best() *types.TipSet { - return tsc.cache[tsc.start] +func (tsc *tipSetCache) best() (*types.TipSet, error) { + best := tsc.cache[tsc.start] + if best == nil { + return tsc.storage.ChainHead(context.TODO()) + } + return best, nil } func normalModulo(n, m int) int { diff --git a/chain/events/tscache_test.go b/chain/events/tscache_test.go index 12fdb6a94..ab6336f24 100644 --- a/chain/events/tscache_test.go +++ b/chain/events/tscache_test.go @@ -4,6 +4,8 @@ import ( "context" "testing" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" @@ -11,12 +13,9 @@ import ( ) func TestTsCache(t *testing.T) { - tsc := newTSCache(50, func(context.Context, uint64, *types.TipSet) (*types.TipSet, error) { - t.Fatal("storage call") - return &types.TipSet{}, nil - }) + tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t}) - h := uint64(75) + h := abi.ChainEpoch(75) a, _ := address.NewFromString("t00") @@ -27,8 +26,8 @@ func TestTsCache(t *testing.T) { ParentStateRoot: dummyCid, Messages: dummyCid, ParentMessageReceipts: dummyCid, - BlockSig: &types.Signature{Type: types.KTBLS}, - BLSAggregate: types.Signature{Type: types.KTBLS}, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, }}) if err != nil { t.Fatal(err) @@ -41,7 +40,12 @@ func TestTsCache(t *testing.T) { for i := 0; i < 9000; i++ { if i%90 > 60 { - if err := tsc.revert(tsc.best()); err != nil { + best, err := tsc.best() + if err != nil { + t.Fatal(err, "; i:", i) + return + } + if err := tsc.revert(best); err != nil { t.Fatal(err, "; i:", i) return } @@ -53,13 +57,23 @@ func TestTsCache(t *testing.T) { } -func TestTsCacheNulls(t *testing.T) { - tsc := newTSCache(50, func(context.Context, uint64, *types.TipSet) (*types.TipSet, error) { - t.Fatal("storage call") - return &types.TipSet{}, nil - }) +type tsCacheAPIFailOnStorageCall struct { + t *testing.T +} - h := uint64(75) +func (tc *tsCacheAPIFailOnStorageCall) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) { + tc.t.Fatal("storage call") + return &types.TipSet{}, nil +} +func (tc *tsCacheAPIFailOnStorageCall) ChainHead(ctx context.Context) (*types.TipSet, error) { + tc.t.Fatal("storage call") + return &types.TipSet{}, nil +} + +func TestTsCacheNulls(t *testing.T) { + tsc := newTSCache(50, &tsCacheAPIFailOnStorageCall{t: t}) + + h := abi.ChainEpoch(75) a, _ := address.NewFromString("t00") add := func() { @@ -69,8 +83,8 @@ func TestTsCacheNulls(t *testing.T) { ParentStateRoot: dummyCid, Messages: dummyCid, ParentMessageReceipts: dummyCid, - BlockSig: &types.Signature{Type: types.KTBLS}, - BLSAggregate: types.Signature{Type: types.KTBLS}, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, }}) if err != nil { t.Fatal(err) @@ -89,7 +103,9 @@ func TestTsCacheNulls(t *testing.T) { add() add() - require.Equal(t, h-1, tsc.best().Height()) + best, err := tsc.best() + require.NoError(t, err) + require.Equal(t, h-1, best.Height()) ts, err := tsc.get(h - 1) require.NoError(t, err) @@ -107,9 +123,17 @@ func TestTsCacheNulls(t *testing.T) { require.NoError(t, err) require.Equal(t, h-8, ts.Height()) - require.NoError(t, tsc.revert(tsc.best())) - require.NoError(t, tsc.revert(tsc.best())) - require.Equal(t, h-8, tsc.best().Height()) + best, err = tsc.best() + require.NoError(t, err) + require.NoError(t, tsc.revert(best)) + + best, err = tsc.best() + require.NoError(t, err) + require.NoError(t, tsc.revert(best)) + + best, err = tsc.best() + require.NoError(t, err) + require.Equal(t, h-8, best.Height()) h += 50 add() @@ -118,3 +142,27 @@ func TestTsCacheNulls(t *testing.T) { require.NoError(t, err) require.Equal(t, h-1, ts.Height()) } + +type tsCacheAPIStorageCallCounter struct { + t *testing.T + chainGetTipSetByHeight int + chainHead int +} + +func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) { + tc.chainGetTipSetByHeight++ + return &types.TipSet{}, nil +} +func (tc *tsCacheAPIStorageCallCounter) ChainHead(ctx context.Context) (*types.TipSet, error) { + tc.chainHead++ + return &types.TipSet{}, nil +} + +func TestTsCacheEmpty(t *testing.T) { + // Calling best on an empty cache should just call out to the chain API + callCounter := &tsCacheAPIStorageCallCounter{t: t} + tsc := newTSCache(50, callCounter) + _, err := tsc.best() + require.NoError(t, err) + require.Equal(t, 1, callCounter.chainHead) +} diff --git a/chain/events/utils.go b/chain/events/utils.go index ba8083f9b..e50dbc6fe 100644 --- a/chain/events/utils.go +++ b/chain/events/utils.go @@ -5,15 +5,14 @@ import ( "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) -func (e *calledEvents) CheckMsg(ctx context.Context, smsg store.ChainMsg, hnd CalledHandler) CheckFunc { +func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd MsgHandler) CheckFunc { msg := smsg.VMMessage() return func(ts *types.TipSet) (done bool, more bool, err error) { - fa, err := e.cs.StateGetActor(ctx, msg.From, ts) + fa, err := me.cs.StateGetActor(ctx, msg.From, ts.Key()) if err != nil { return false, true, err } @@ -23,7 +22,7 @@ func (e *calledEvents) CheckMsg(ctx context.Context, smsg store.ChainMsg, hnd Ca return false, true, nil } - rec, err := e.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts) + rec, err := me.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts.Key()) if err != nil { return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err) } @@ -34,12 +33,12 @@ func (e *calledEvents) CheckMsg(ctx context.Context, smsg store.ChainMsg, hnd Ca } } -func (e *calledEvents) MatchMsg(inmsg *types.Message) MatchFunc { - return func(msg *types.Message) (bool, error) { +func (me *messageEvents) MatchMsg(inmsg *types.Message) MsgMatchFunc { + return func(msg *types.Message) (matchOnce bool, matched bool, err error) { if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) { - return false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %s", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) + return true, false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) } - return inmsg.Equals(msg), nil + return true, inmsg.Equals(msg), nil } } diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go new file mode 100644 index 000000000..29b258081 --- /dev/null +++ b/chain/exchange/cbor_gen.go @@ -0,0 +1,644 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package exchange + +import ( + "fmt" + "io" + + types "github.com/filecoin-project/lotus/chain/types" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufRequest = []byte{131} + +func (t *Request) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufRequest); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Head ([]cid.Cid) (slice) + if len(t.Head) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Head was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Head))); err != nil { + return err + } + for _, v := range t.Head { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Head: %w", err) + } + } + + // t.Length (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Length)); err != nil { + return err + } + + // t.Options (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Options)); err != nil { + return err + } + + return nil +} + +func (t *Request) UnmarshalCBOR(r io.Reader) error { + *t = Request{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Head ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Head: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Head = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.Head failed: %w", err) + } + t.Head[i] = c + } + + // t.Length (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = uint64(extra) + + } + // t.Options (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Options = uint64(extra) + + } + return nil +} + +var lengthBufResponse = []byte{131} + +func (t *Response) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufResponse); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Status (exchange.status) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.ErrorMessage (string) (string) + if len(t.ErrorMessage) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ErrorMessage was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ErrorMessage))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ErrorMessage)); err != nil { + return err + } + + // t.Chain ([]*exchange.BSTipSet) (slice) + if len(t.Chain) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Chain was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Chain))); err != nil { + return err + } + for _, v := range t.Chain { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *Response) UnmarshalCBOR(r io.Reader) error { + *t = Response{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Status (exchange.status) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = status(extra) + + } + // t.ErrorMessage (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.ErrorMessage = string(sval) + } + // t.Chain ([]*exchange.BSTipSet) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Chain: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Chain = make([]*BSTipSet, extra) + } + + for i := 0; i < int(extra); i++ { + + var v BSTipSet + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Chain[i] = &v + } + + return nil +} + +var lengthBufCompactedMessages = []byte{132} + +func (t *CompactedMessages) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCompactedMessages); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Bls ([]*types.Message) (slice) + if len(t.Bls) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Bls was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Bls))); err != nil { + return err + } + for _, v := range t.Bls { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.BlsIncludes ([][]uint64) (slice) + if len(t.BlsIncludes) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.BlsIncludes was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BlsIncludes))); err != nil { + return err + } + for _, v := range t.BlsIncludes { + if len(v) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field v was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(v))); err != nil { + return err + } + for _, v := range v { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + } + + // t.Secpk ([]*types.SignedMessage) (slice) + if len(t.Secpk) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Secpk was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Secpk))); err != nil { + return err + } + for _, v := range t.Secpk { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.SecpkIncludes ([][]uint64) (slice) + if len(t.SecpkIncludes) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.SecpkIncludes was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.SecpkIncludes))); err != nil { + return err + } + for _, v := range t.SecpkIncludes { + if len(v) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field v was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(v))); err != nil { + return err + } + for _, v := range v { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + } + return nil +} + +func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error { + *t = CompactedMessages{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Bls ([]*types.Message) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Bls: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Bls = make([]*types.Message, extra) + } + + for i := 0; i < int(extra); i++ { + + var v types.Message + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Bls[i] = &v + } + + // t.BlsIncludes ([][]uint64) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BlsIncludes: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BlsIncludes = make([][]uint64, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BlsIncludes[i]: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BlsIncludes[i] = make([]uint64, extra) + } + + for j := 0; j < int(extra); j++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.BlsIncludes[i] slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.BlsIncludes[i] was not a uint, instead got %d", maj) + } + + t.BlsIncludes[i][j] = uint64(val) + } + + } + } + + // t.Secpk ([]*types.SignedMessage) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Secpk: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Secpk = make([]*types.SignedMessage, extra) + } + + for i := 0; i < int(extra); i++ { + + var v types.SignedMessage + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Secpk[i] = &v + } + + // t.SecpkIncludes ([][]uint64) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.SecpkIncludes: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.SecpkIncludes = make([][]uint64, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.SecpkIncludes[i]: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.SecpkIncludes[i] = make([]uint64, extra) + } + + for j := 0; j < int(extra); j++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.SecpkIncludes[i] slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.SecpkIncludes[i] was not a uint, instead got %d", maj) + } + + t.SecpkIncludes[i][j] = uint64(val) + } + + } + } + + return nil +} + +var lengthBufBSTipSet = []byte{130} + +func (t *BSTipSet) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufBSTipSet); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Blocks ([]*types.BlockHeader) (slice) + if len(t.Blocks) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Blocks was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Blocks))); err != nil { + return err + } + for _, v := range t.Blocks { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.Messages (exchange.CompactedMessages) (struct) + if err := t.Messages.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error { + *t = BSTipSet{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Blocks ([]*types.BlockHeader) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Blocks: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Blocks = make([]*types.BlockHeader, extra) + } + + for i := 0; i < int(extra); i++ { + + var v types.BlockHeader + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Blocks[i] = &v + } + + // t.Messages (exchange.CompactedMessages) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Messages = new(CompactedMessages) + if err := t.Messages.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Messages pointer: %w", err) + } + } + + } + return nil +} diff --git a/chain/exchange/client.go b/chain/exchange/client.go new file mode 100644 index 000000000..371c50aed --- /dev/null +++ b/chain/exchange/client.go @@ -0,0 +1,491 @@ +package exchange + +import ( + "bufio" + "context" + "fmt" + "math/rand" + "time" + + "github.com/libp2p/go-libp2p-core/helpers" + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + "go.opencensus.io/trace" + "go.uber.org/fx" + "golang.org/x/xerrors" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + incrt "github.com/filecoin-project/lotus/lib/increadtimeout" + "github.com/filecoin-project/lotus/lib/peermgr" +) + +// client implements exchange.Client, using the libp2p ChainExchange protocol +// as the fetching mechanism. +type client struct { + // Connection manager used to contact the server. + // FIXME: We should have a reduced interface here, initialized + // just with our protocol ID, we shouldn't be able to open *any* + // connection. + host host.Host + + peerTracker *bsPeerTracker +} + +var _ Client = (*client)(nil) + +// NewClient creates a new libp2p-based exchange.Client that uses the libp2p +// ChainExhange protocol as the fetching mechanism. +func NewClient(lc fx.Lifecycle, host host.Host, pmgr peermgr.MaybePeerMgr) Client { + return &client{ + host: host, + peerTracker: newPeerTracker(lc, host, pmgr.Mgr), + } +} + +// Main logic of the client request service. The provided `Request` +// is sent to the `singlePeer` if one is indicated or to all available +// ones otherwise. The response is processed and validated according +// to the `Request` options. Either a `validatedResponse` is returned +// (which can be safely accessed), or an `error` that may represent +// either a response error status, a failed validation or an internal +// error. +// +// This is the internal single point of entry for all external-facing +// APIs, currently we have 3 very heterogeneous services exposed: +// * GetBlocks: Headers +// * GetFullTipSet: Headers | Messages +// * GetChainMessages: Messages +// This function handles all the different combinations of the available +// request options without disrupting external calls. In the future the +// consumers should be forced to use a more standardized service and +// adhere to a single API derived from this function. +func (c *client) doRequest( + ctx context.Context, + req *Request, + singlePeer *peer.ID, + // In the `GetChainMessages` case, we won't request the headers but we still + // need them to check the integrity of the `CompactedMessages` in the response + // so the tipset blocks need to be provided by the caller. + tipsets []*types.TipSet, +) (*validatedResponse, error) { + // Validate request. + if req.Length == 0 { + return nil, xerrors.Errorf("invalid request of length 0") + } + if req.Length > MaxRequestLength { + return nil, xerrors.Errorf("request length (%d) above maximum (%d)", + req.Length, MaxRequestLength) + } + if req.Options == 0 { + return nil, xerrors.Errorf("request with no options set") + } + + // Generate the list of peers to be queried, either the + // `singlePeer` indicated or all peers available (sorted + // by an internal peer tracker with some randomness injected). + var peers []peer.ID + if singlePeer != nil { + peers = []peer.ID{*singlePeer} + } else { + peers = c.getShuffledPeers() + if len(peers) == 0 { + return nil, xerrors.Errorf("no peers available") + } + } + + // Try the request for each peer in the list, + // return on the first successful response. + // FIXME: Doing this serially isn't great, but fetching in parallel + // may not be a good idea either. Think about this more. + globalTime := build.Clock.Now() + // Global time used to track what is the expected time we will need to get + // a response if a client fails us. + for _, peer := range peers { + select { + case <-ctx.Done(): + return nil, xerrors.Errorf("context cancelled: %w", ctx.Err()) + default: + } + + // Send request, read response. + res, err := c.sendRequestToPeer(ctx, peer, req) + if err != nil { + if !xerrors.Is(err, network.ErrNoConn) { + log.Warnf("could not connect to peer %s: %s", + peer.String(), err) + } + continue + } + + // Process and validate response. + validRes, err := c.processResponse(req, res, tipsets) + if err != nil { + log.Warnf("processing peer %s response failed: %s", + peer.String(), err) + continue + } + + c.peerTracker.logGlobalSuccess(build.Clock.Since(globalTime)) + c.host.ConnManager().TagPeer(peer, "bsync", SuccessPeerTagValue) + return validRes, nil + } + + errString := "doRequest failed for all peers" + if singlePeer != nil { + errString = fmt.Sprintf("doRequest failed for single peer %s", *singlePeer) + } + return nil, xerrors.Errorf(errString) +} + +// Process and validate response. Check the status, the integrity of the +// information returned, and that it matches the request. Extract the information +// into a `validatedResponse` for the external-facing APIs to select what they +// need. +// +// We are conflating in the single error returned both status and validation +// errors. Peer penalization should happen here then, before returning, so +// we can apply the correct penalties depending on the cause of the error. +// FIXME: Add the `peer` as argument once we implement penalties. +func (c *client) processResponse(req *Request, res *Response, tipsets []*types.TipSet) (*validatedResponse, error) { + err := res.statusToError() + if err != nil { + return nil, xerrors.Errorf("status error: %s", err) + } + + options := parseOptions(req.Options) + if options.noOptionsSet() { + // Safety check: this shouldn't have been sent, and even if it did + // it should have been caught by the peer in its error status. + return nil, xerrors.Errorf("nothing was requested") + } + + // Verify that the chain segment returned is in the valid range. + // Note that the returned length might be less than requested. + resLength := len(res.Chain) + if resLength == 0 { + return nil, xerrors.Errorf("got no chain in successful response") + } + if resLength > int(req.Length) { + return nil, xerrors.Errorf("got longer response (%d) than requested (%d)", + resLength, req.Length) + } + if resLength < int(req.Length) && res.Status != Partial { + return nil, xerrors.Errorf("got less than requested without a proper status: %d", res.Status) + } + + validRes := &validatedResponse{} + if options.IncludeHeaders { + // Check for valid block sets and extract them into `TipSet`s. + validRes.tipsets = make([]*types.TipSet, resLength) + for i := 0; i < resLength; i++ { + if res.Chain[i] == nil { + return nil, xerrors.Errorf("response with nil tipset in pos %d", i) + } + for blockIdx, block := range res.Chain[i].Blocks { + if block == nil { + return nil, xerrors.Errorf("tipset with nil block in pos %d", blockIdx) + // FIXME: Maybe we should move this check to `NewTipSet`. + } + } + + validRes.tipsets[i], err = types.NewTipSet(res.Chain[i].Blocks) + if err != nil { + return nil, xerrors.Errorf("invalid tipset blocks at height (head - %d): %w", i, err) + } + } + + // Check that the returned head matches the one requested. + if !types.CidArrsEqual(validRes.tipsets[0].Cids(), req.Head) { + return nil, xerrors.Errorf("returned chain head does not match request") + } + + // Check `TipSet`s are connected (valid chain). + for i := 0; i < len(validRes.tipsets)-1; i++ { + if validRes.tipsets[i].IsChildOf(validRes.tipsets[i+1]) == false { + return nil, fmt.Errorf("tipsets are not connected at height (head - %d)/(head - %d)", + i, i+1) + // FIXME: Maybe give more information here, like CIDs. + } + } + } + + if options.IncludeMessages { + validRes.messages = make([]*CompactedMessages, resLength) + for i := 0; i < resLength; i++ { + if res.Chain[i].Messages == nil { + return nil, xerrors.Errorf("no messages included for tipset at height (head - %d)", i) + } + validRes.messages[i] = res.Chain[i].Messages + } + + if options.IncludeHeaders { + // If the headers were also returned check that the compression + // indexes are valid before `toFullTipSets()` is called by the + // consumer. + err := c.validateCompressedIndices(res.Chain) + if err != nil { + return nil, err + } + } else { + // If we didn't request the headers they should have been provided + // by the caller. + if len(tipsets) < len(res.Chain) { + return nil, xerrors.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) + } + chain := make([]*BSTipSet, 0, resLength) + for i, resChain := range res.Chain { + next := &BSTipSet{ + Blocks: tipsets[i].Blocks(), + Messages: resChain.Messages, + } + chain = append(chain, next) + } + + err := c.validateCompressedIndices(chain) + if err != nil { + return nil, err + } + } + } + + return validRes, nil +} + +func (c *client) validateCompressedIndices(chain []*BSTipSet) error { + resLength := len(chain) + for tipsetIdx := 0; tipsetIdx < resLength; tipsetIdx++ { + msgs := chain[tipsetIdx].Messages + blocksNum := len(chain[tipsetIdx].Blocks) + + if len(msgs.BlsIncludes) != blocksNum { + return xerrors.Errorf("BlsIncludes (%d) does not match number of blocks (%d)", + len(msgs.BlsIncludes), blocksNum) + } + + if len(msgs.SecpkIncludes) != blocksNum { + return xerrors.Errorf("SecpkIncludes (%d) does not match number of blocks (%d)", + len(msgs.SecpkIncludes), blocksNum) + } + + for blockIdx := 0; blockIdx < blocksNum; blockIdx++ { + for _, mi := range msgs.BlsIncludes[blockIdx] { + if int(mi) >= len(msgs.Bls) { + return xerrors.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)", + mi, len(msgs.Bls)) + } + } + + for _, mi := range msgs.SecpkIncludes[blockIdx] { + if int(mi) >= len(msgs.Secpk) { + return xerrors.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)", + mi, len(msgs.Secpk)) + } + } + } + } + + return nil +} + +// GetBlocks implements Client.GetBlocks(). Refer to the godocs there. +func (c *client) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { + ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("tipset", fmt.Sprint(tsk.Cids())), + trace.Int64Attribute("count", int64(count)), + ) + } + + req := &Request{ + Head: tsk.Cids(), + Length: uint64(count), + Options: Headers, + } + + validRes, err := c.doRequest(ctx, req, nil, nil) + if err != nil { + return nil, err + } + + return validRes.tipsets, nil +} + +// GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there. +func (c *client) GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { + // TODO: round robin through these peers on error + + req := &Request{ + Head: tsk.Cids(), + Length: 1, + Options: Headers | Messages, + } + + validRes, err := c.doRequest(ctx, req, &peer, nil) + if err != nil { + return nil, err + } + + return validRes.toFullTipSets()[0], nil + // If `doRequest` didn't fail we are guaranteed to have at least + // *one* tipset here, so it's safe to index directly. +} + +// GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there. +func (c *client) GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*CompactedMessages, error) { + head := tipsets[0] + length := uint64(len(tipsets)) + + ctx, span := trace.StartSpan(ctx, "GetChainMessages") + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("tipset", fmt.Sprint(head.Cids())), + trace.Int64Attribute("count", int64(length)), + ) + } + defer span.End() + + req := &Request{ + Head: head.Cids(), + Length: length, + Options: Messages, + } + + validRes, err := c.doRequest(ctx, req, nil, tipsets) + if err != nil { + return nil, err + } + + return validRes.messages, nil +} + +// Send a request to a peer. Write request in the stream and read the +// response back. We do not do any processing of the request/response +// here. +func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Request) (_ *Response, err error) { + // Trace code. + ctx, span := trace.StartSpan(ctx, "sendRequestToPeer") + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("peer", peer.Pretty()), + ) + } + defer func() { + if err != nil { + if span.IsRecordingEvents() { + span.SetStatus(trace.Status{ + Code: 5, + Message: err.Error(), + }) + } + } + }() + // -- TRACE -- + + supported, err := c.host.Peerstore().SupportsProtocols(peer, BlockSyncProtocolID, ChainExchangeProtocolID) + if err != nil { + c.RemovePeer(peer) + return nil, xerrors.Errorf("failed to get protocols for peer: %w", err) + } + if len(supported) == 0 || (supported[0] != BlockSyncProtocolID && supported[0] != ChainExchangeProtocolID) { + return nil, xerrors.Errorf("peer %s does not support protocols %s", + peer, []string{BlockSyncProtocolID, ChainExchangeProtocolID}) + } + + connectionStart := build.Clock.Now() + + // Open stream to peer. + stream, err := c.host.NewStream( + network.WithNoDial(ctx, "should already have connection"), + peer, + ChainExchangeProtocolID, BlockSyncProtocolID) + if err != nil { + c.RemovePeer(peer) + return nil, xerrors.Errorf("failed to open stream to peer: %w", err) + } + + defer func() { + // Note: this will become just stream.Close once we've completed the go-libp2p migration to + // go-libp2p-core 0.7.0 + go helpers.FullClose(stream) //nolint:errcheck + }() + + // Write request. + _ = stream.SetWriteDeadline(time.Now().Add(WriteReqDeadline)) + if err := cborutil.WriteCborRPC(stream, req); err != nil { + _ = stream.SetWriteDeadline(time.Time{}) + c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length) + // FIXME: Should we also remove peer here? + return nil, err + } + _ = stream.SetWriteDeadline(time.Time{}) // clear deadline // FIXME: Needs + // its own API (https://github.com/libp2p/go-libp2p-core/issues/162). + + // Read response. + var res Response + err = cborutil.ReadCborRPC( + bufio.NewReader(incrt.New(stream, ReadResMinSpeed, ReadResDeadline)), + &res) + if err != nil { + c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length) + return nil, xerrors.Errorf("failed to read chainxchg response: %w", err) + } + + // FIXME: Move all this together at the top using a defer as done elsewhere. + // Maybe we need to declare `res` in the signature. + if span.IsRecordingEvents() { + span.AddAttributes( + trace.Int64Attribute("resp_status", int64(res.Status)), + trace.StringAttribute("msg", res.ErrorMessage), + trace.Int64Attribute("chain_len", int64(len(res.Chain))), + ) + } + + c.peerTracker.logSuccess(peer, build.Clock.Since(connectionStart), uint64(len(res.Chain))) + // FIXME: We should really log a success only after we validate the response. + // It might be a bit hard to do. + return &res, nil +} + +// AddPeer implements Client.AddPeer(). Refer to the godocs there. +func (c *client) AddPeer(p peer.ID) { + c.peerTracker.addPeer(p) +} + +// RemovePeer implements Client.RemovePeer(). Refer to the godocs there. +func (c *client) RemovePeer(p peer.ID) { + c.peerTracker.removePeer(p) +} + +// getShuffledPeers returns a preference-sorted set of peers (by latency +// and failure counting), shuffling the first few peers so we don't always +// pick the same peer. +// FIXME: Consider merging with `shufflePrefix()s`. +func (c *client) getShuffledPeers() []peer.ID { + peers := c.peerTracker.prefSortedPeers() + shufflePrefix(peers) + return peers +} + +func shufflePrefix(peers []peer.ID) { + prefix := ShufflePeersPrefix + if len(peers) < prefix { + prefix = len(peers) + } + + buf := make([]peer.ID, prefix) + perm := rand.Perm(prefix) + for i, v := range perm { + buf[i] = peers[v] + } + + copy(peers, buf) +} diff --git a/chain/exchange/doc.go b/chain/exchange/doc.go new file mode 100644 index 000000000..b20ee0c1f --- /dev/null +++ b/chain/exchange/doc.go @@ -0,0 +1,19 @@ +// Package exchange contains the ChainExchange server and client components. +// +// ChainExchange is the basic chain synchronization protocol of Filecoin. +// ChainExchange is an RPC-oriented protocol, with a single operation to +// request blocks for now. +// +// A request contains a start anchor block (referred to with a CID), and a +// amount of blocks requested beyond the anchor (including the anchor itself). +// +// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports +// two options at the moment: +// +// - include block contents +// - include block messages +// +// The response will include a status code, an optional message, and the +// response payload in case of success. The payload is a slice of serialized +// tipsets. +package exchange diff --git a/chain/exchange/interfaces.go b/chain/exchange/interfaces.go new file mode 100644 index 000000000..acc0854da --- /dev/null +++ b/chain/exchange/interfaces.go @@ -0,0 +1,50 @@ +package exchange + +import ( + "context" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" +) + +// Server is the responder side of the ChainExchange protocol. It accepts +// requests from clients and services them by returning the requested +// chain data. +type Server interface { + // HandleStream is the protocol handler to be registered on a libp2p + // protocol router. + // + // In the current version of the protocol, streams are single-use. The + // server will read a single Request, and will respond with a single + // Response. It will dispose of the stream straight after. + HandleStream(stream network.Stream) +} + +// Client is the requesting side of the ChainExchange protocol. It acts as +// a proxy for other components to request chain data from peers. It is chiefly +// used by the Syncer. +type Client interface { + // GetBlocks fetches block headers from the network, from the provided + // tipset *backwards*, returning as many tipsets as the count parameter, + // or less. + GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) + + // GetChainMessages fetches messages from the network, starting from the first provided tipset + // and returning messages from as many tipsets as requested or less. + GetChainMessages(ctx context.Context, tipsets []*types.TipSet) ([]*CompactedMessages, error) + + // GetFullTipSet fetches a full tipset from a given peer. If successful, + // the fetched object contains block headers and all messages in full form. + GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) + + // AddPeer adds a peer to the pool of peers that the Client requests + // data from. + AddPeer(peer peer.ID) + + // RemovePeer removes a peer from the pool of peers that the Client + // requests data from. + RemovePeer(peer peer.ID) +} diff --git a/chain/exchange/peer_tracker.go b/chain/exchange/peer_tracker.go new file mode 100644 index 000000000..902baadce --- /dev/null +++ b/chain/exchange/peer_tracker.go @@ -0,0 +1,189 @@ +package exchange + +// FIXME: This needs to be reviewed. + +import ( + "context" + "sort" + "sync" + "time" + + host "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/peer" + "go.uber.org/fx" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/lib/peermgr" +) + +type peerStats struct { + successes int + failures int + firstSeen time.Time + averageTime time.Duration +} + +type bsPeerTracker struct { + lk sync.Mutex + + peers map[peer.ID]*peerStats + avgGlobalTime time.Duration + + pmgr *peermgr.PeerMgr +} + +func newPeerTracker(lc fx.Lifecycle, h host.Host, pmgr *peermgr.PeerMgr) *bsPeerTracker { + bsPt := &bsPeerTracker{ + peers: make(map[peer.ID]*peerStats), + pmgr: pmgr, + } + + sub, err := h.EventBus().Subscribe(new(peermgr.NewFilPeer)) + if err != nil { + panic(err) + } + + go func() { + for newPeer := range sub.Out() { + bsPt.addPeer(newPeer.(peermgr.NewFilPeer).Id) + } + }() + + lc.Append(fx.Hook{ + OnStop: func(ctx context.Context) error { + return sub.Close() + }, + }) + + return bsPt +} + +func (bpt *bsPeerTracker) addPeer(p peer.ID) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + if _, ok := bpt.peers[p]; ok { + return + } + bpt.peers[p] = &peerStats{ + firstSeen: build.Clock.Now(), + } + +} + +const ( + // newPeerMul is how much better than average is the new peer assumed to be + // less than one to encourouge trying new peers + newPeerMul = 0.9 +) + +func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID { + // TODO: this could probably be cached, but as long as its not too many peers, fine for now + bpt.lk.Lock() + defer bpt.lk.Unlock() + out := make([]peer.ID, 0, len(bpt.peers)) + for p := range bpt.peers { + out = append(out, p) + } + + // sort by 'expected cost' of requesting data from that peer + // additionally handle edge cases where not enough data is available + sort.Slice(out, func(i, j int) bool { + pi := bpt.peers[out[i]] + pj := bpt.peers[out[j]] + + var costI, costJ float64 + + getPeerInitLat := func(p peer.ID) float64 { + return float64(bpt.avgGlobalTime) * newPeerMul + } + + if pi.successes+pi.failures > 0 { + failRateI := float64(pi.failures) / float64(pi.failures+pi.successes) + costI = float64(pi.averageTime) + failRateI*float64(bpt.avgGlobalTime) + } else { + costI = getPeerInitLat(out[i]) + } + + if pj.successes+pj.failures > 0 { + failRateJ := float64(pj.failures) / float64(pj.failures+pj.successes) + costJ = float64(pj.averageTime) + failRateJ*float64(bpt.avgGlobalTime) + } else { + costJ = getPeerInitLat(out[j]) + } + + return costI < costJ + }) + + return out +} + +const ( + // xInvAlpha = (N+1)/2 + + localInvAlpha = 10 // 86% of the value is the last 19 + globalInvAlpha = 25 // 86% of the value is the last 49 +) + +func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + if bpt.avgGlobalTime == 0 { + bpt.avgGlobalTime = dur + return + } + delta := (dur - bpt.avgGlobalTime) / globalInvAlpha + bpt.avgGlobalTime += delta +} + +func logTime(pi *peerStats, dur time.Duration) { + if pi.averageTime == 0 { + pi.averageTime = dur + return + } + delta := (dur - pi.averageTime) / localInvAlpha + pi.averageTime += delta + +} + +func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration, reqSize uint64) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + var pi *peerStats + var ok bool + if pi, ok = bpt.peers[p]; !ok { + log.Warnw("log success called on peer not in tracker", "peerid", p.String()) + return + } + + pi.successes++ + if reqSize == 0 { + reqSize = 1 + } + logTime(pi, dur/time.Duration(reqSize)) +} + +func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration, reqSize uint64) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + + var pi *peerStats + var ok bool + if pi, ok = bpt.peers[p]; !ok { + log.Warn("log failure called on peer not in tracker", "peerid", p.String()) + return + } + + pi.failures++ + if reqSize == 0 { + reqSize = 1 + } + logTime(pi, dur/time.Duration(reqSize)) +} + +func (bpt *bsPeerTracker) removePeer(p peer.ID) { + bpt.lk.Lock() + defer bpt.lk.Unlock() + delete(bpt.peers, p) +} diff --git a/chain/exchange/protocol.go b/chain/exchange/protocol.go new file mode 100644 index 000000000..211479335 --- /dev/null +++ b/chain/exchange/protocol.go @@ -0,0 +1,207 @@ +package exchange + +import ( + "time" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/store" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" +) + +var log = logging.Logger("chainxchg") + +const ( + // BlockSyncProtocolID is the protocol ID of the former blocksync protocol. + // Deprecated. + BlockSyncProtocolID = "/fil/sync/blk/0.0.1" + + // ChainExchangeProtocolID is the protocol ID of the chain exchange + // protocol. + ChainExchangeProtocolID = "/fil/chain/xchg/0.0.1" +) + +// FIXME: Bumped from original 800 to this to accommodate `syncFork()` +// use of `GetBlocks()`. It seems the expectation of that API is to +// fetch any amount of blocks leaving it to the internal logic here +// to partition and reassemble the requests if they go above the maximum. +// (Also as a consequence of this temporarily removing the `const` +// qualifier to avoid "const initializer [...] is not a constant" error.) +var MaxRequestLength = uint64(build.ForkLengthThreshold) + +const ( + // Extracted constants from the code. + // FIXME: Should be reviewed and confirmed. + SuccessPeerTagValue = 25 + WriteReqDeadline = 5 * time.Second + ReadResDeadline = WriteReqDeadline + ReadResMinSpeed = 50 << 10 + ShufflePeersPrefix = 16 + WriteResDeadline = 60 * time.Second +) + +// FIXME: Rename. Make private. +type Request struct { + // List of ordered CIDs comprising a `TipSetKey` from where to start + // fetching backwards. + // FIXME: Consider using `TipSetKey` now (introduced after the creation + // of this protocol) instead of converting back and forth. + Head []cid.Cid + // Number of block sets to fetch from `Head` (inclusive, should always + // be in the range `[1, MaxRequestLength]`). + Length uint64 + // Request options, see `Options` type for more details. Compressed + // in a single `uint64` to save space. + Options uint64 +} + +// `Request` processed and validated to query the tipsets needed. +type validatedRequest struct { + head types.TipSetKey + length uint64 + options *parsedOptions +} + +// Request options. When fetching the chain segment we can fetch +// either block headers, messages, or both. +const ( + Headers = 1 << iota + Messages +) + +// Decompressed options into separate struct members for easy access +// during internal processing.. +type parsedOptions struct { + IncludeHeaders bool + IncludeMessages bool +} + +func (options *parsedOptions) noOptionsSet() bool { + return options.IncludeHeaders == false && + options.IncludeMessages == false +} + +func parseOptions(optfield uint64) *parsedOptions { + return &parsedOptions{ + IncludeHeaders: optfield&(uint64(Headers)) != 0, + IncludeMessages: optfield&(uint64(Messages)) != 0, + } +} + +// FIXME: Rename. Make private. +type Response struct { + Status status + // String that complements the error status when converting to an + // internal error (see `statusToError()`). + ErrorMessage string + + Chain []*BSTipSet +} + +type status uint64 + +const ( + Ok status = 0 + // We could not fetch all blocks requested (but at least we returned + // the `Head` requested). Not considered an error. + Partial = 101 + + // Errors + NotFound = 201 + GoAway = 202 + InternalError = 203 + BadRequest = 204 +) + +// Convert status to internal error. +func (res *Response) statusToError() error { + switch res.Status { + case Ok, Partial: + return nil + // FIXME: Consider if we want to not process `Partial` responses + // and return an error instead. + case NotFound: + return xerrors.Errorf("not found") + case GoAway: + return xerrors.Errorf("not handling 'go away' chainxchg responses yet") + case InternalError: + return xerrors.Errorf("block sync peer errored: %s", res.ErrorMessage) + case BadRequest: + return xerrors.Errorf("block sync request invalid: %s", res.ErrorMessage) + default: + return xerrors.Errorf("unrecognized response code: %d", res.Status) + } +} + +// FIXME: Rename. +type BSTipSet struct { + // List of blocks belonging to a single tipset to which the + // `CompactedMessages` are linked. + Blocks []*types.BlockHeader + Messages *CompactedMessages +} + +// All messages of a single tipset compacted together instead +// of grouped by block to save space, since there are normally +// many repeated messages per tipset in different blocks. +// +// `BlsIncludes`/`SecpkIncludes` matches `Bls`/`Secpk` messages +// to blocks in the tipsets with the format: +// `BlsIncludes[BI][MI]` +// * BI: block index in the tipset. +// * MI: message index in `Bls` list +// +// FIXME: The logic to decompress this structure should belong +// to itself, not to the consumer. +type CompactedMessages struct { + Bls []*types.Message + BlsIncludes [][]uint64 + + Secpk []*types.SignedMessage + SecpkIncludes [][]uint64 +} + +// Response that has been validated according to the protocol +// and can be safely accessed. +type validatedResponse struct { + tipsets []*types.TipSet + // List of all messages per tipset (grouped by tipset, + // not by block, hence a single index like `tipsets`). + messages []*CompactedMessages +} + +// Decompress messages and form full tipsets with them. The headers +// need to have been requested as well. +func (res *validatedResponse) toFullTipSets() []*store.FullTipSet { + if len(res.tipsets) == 0 || len(res.tipsets) != len(res.messages) { + // This decompression can only be done if both headers and + // messages are returned in the response. (The second check + // is already implied by the guarantees of `validatedResponse`, + // added here just for completeness.) + return nil + } + ftsList := make([]*store.FullTipSet, len(res.tipsets)) + for tipsetIdx := range res.tipsets { + fts := &store.FullTipSet{} // FIXME: We should use the `NewFullTipSet` API. + msgs := res.messages[tipsetIdx] + for blockIdx, b := range res.tipsets[tipsetIdx].Blocks() { + fb := &types.FullBlock{ + Header: b, + } + for _, mi := range msgs.BlsIncludes[blockIdx] { + fb.BlsMessages = append(fb.BlsMessages, msgs.Bls[mi]) + } + for _, mi := range msgs.SecpkIncludes[blockIdx] { + fb.SecpkMessages = append(fb.SecpkMessages, msgs.Secpk[mi]) + } + + fts.Blocks = append(fts.Blocks, fb) + } + ftsList[tipsetIdx] = fts + } + return ftsList +} diff --git a/chain/exchange/server.go b/chain/exchange/server.go new file mode 100644 index 000000000..dcdb5b3a5 --- /dev/null +++ b/chain/exchange/server.go @@ -0,0 +1,250 @@ +package exchange + +import ( + "bufio" + "context" + "fmt" + "time" + + "go.opencensus.io/trace" + "golang.org/x/xerrors" + + cborutil "github.com/filecoin-project/go-cbor-util" + + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/helpers" + inet "github.com/libp2p/go-libp2p-core/network" +) + +// server implements exchange.Server. It services requests for the +// libp2p ChainExchange protocol. +type server struct { + cs *store.ChainStore +} + +var _ Server = (*server)(nil) + +// NewServer creates a new libp2p-based exchange.Server. It services requests +// for the libp2p ChainExchange protocol. +func NewServer(cs *store.ChainStore) Server { + return &server{ + cs: cs, + } +} + +// HandleStream implements Server.HandleStream. Refer to the godocs there. +func (s *server) HandleStream(stream inet.Stream) { + ctx, span := trace.StartSpan(context.Background(), "chainxchg.HandleStream") + defer span.End() + + // Note: this will become just stream.Close once we've completed the go-libp2p migration to + // go-libp2p-core 0.7.0 + defer helpers.FullClose(stream) //nolint:errcheck + + var req Request + if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil { + log.Warnf("failed to read block sync request: %s", err) + return + } + log.Infow("block sync request", + "start", req.Head, "len", req.Length) + + resp, err := s.processRequest(ctx, &req) + if err != nil { + log.Warn("failed to process request: ", err) + return + } + + _ = stream.SetDeadline(time.Now().Add(WriteResDeadline)) + if err := cborutil.WriteCborRPC(stream, resp); err != nil { + _ = stream.SetDeadline(time.Time{}) + log.Warnw("failed to write back response for handle stream", + "err", err, "peer", stream.Conn().RemotePeer()) + return + } + _ = stream.SetDeadline(time.Time{}) +} + +// Validate and service the request. We return either a protocol +// response or an internal error. +func (s *server) processRequest(ctx context.Context, req *Request) (*Response, error) { + validReq, errResponse := validateRequest(ctx, req) + if errResponse != nil { + // The request did not pass validation, return the response + // indicating it. + return errResponse, nil + } + + return s.serviceRequest(ctx, validReq) +} + +// Validate request. We either return a `validatedRequest`, or an error +// `Response` indicating why we can't process it. We do not return any +// internal errors here, we just signal protocol ones. +func validateRequest(ctx context.Context, req *Request) (*validatedRequest, *Response) { + _, span := trace.StartSpan(ctx, "chainxchg.ValidateRequest") + defer span.End() + + validReq := validatedRequest{} + + validReq.options = parseOptions(req.Options) + if validReq.options.noOptionsSet() { + return nil, &Response{ + Status: BadRequest, + ErrorMessage: "no options set", + } + } + + validReq.length = req.Length + if validReq.length > MaxRequestLength { + return nil, &Response{ + Status: BadRequest, + ErrorMessage: fmt.Sprintf("request length over maximum allowed (%d)", + MaxRequestLength), + } + } + if validReq.length == 0 { + return nil, &Response{ + Status: BadRequest, + ErrorMessage: "invalid request length of zero", + } + } + + if len(req.Head) == 0 { + return nil, &Response{ + Status: BadRequest, + ErrorMessage: "no cids in request", + } + } + validReq.head = types.NewTipSetKey(req.Head...) + + // FIXME: Add as a defer at the start. + span.AddAttributes( + trace.BoolAttribute("blocks", validReq.options.IncludeHeaders), + trace.BoolAttribute("messages", validReq.options.IncludeMessages), + trace.Int64Attribute("reqlen", int64(validReq.length)), + ) + + return &validReq, nil +} + +func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Response, error) { + _, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest") + defer span.End() + + chain, err := collectChainSegment(s.cs, req) + if err != nil { + log.Warn("block sync request: collectChainSegment failed: ", err) + return &Response{ + Status: InternalError, + ErrorMessage: err.Error(), + }, nil + } + + status := Ok + if len(chain) < int(req.length) { + status = Partial + } + + return &Response{ + Chain: chain, + Status: status, + }, nil +} + +func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) { + var bstips []*BSTipSet + + cur := req.head + for { + var bst BSTipSet + ts, err := cs.LoadTipSet(cur) + if err != nil { + return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err) + } + + if req.options.IncludeHeaders { + bst.Blocks = ts.Blocks() + } + + if req.options.IncludeMessages { + bmsgs, bmincl, smsgs, smincl, err := gatherMessages(cs, ts) + if err != nil { + return nil, xerrors.Errorf("gather messages failed: %w", err) + } + + // FIXME: Pass the response to `gatherMessages()` and set all this there. + bst.Messages = &CompactedMessages{} + bst.Messages.Bls = bmsgs + bst.Messages.BlsIncludes = bmincl + bst.Messages.Secpk = smsgs + bst.Messages.SecpkIncludes = smincl + } + + bstips = append(bstips, &bst) + + // If we collected the length requested or if we reached the + // start (genesis), then stop. + if uint64(len(bstips)) >= req.length || ts.Height() == 0 { + return bstips, nil + } + + cur = ts.Parents() + } +} + +func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { + blsmsgmap := make(map[cid.Cid]uint64) + secpkmsgmap := make(map[cid.Cid]uint64) + var secpkincl, blsincl [][]uint64 + + var blscids, secpkcids []cid.Cid + for _, block := range ts.Blocks() { + bc, sc, err := cs.ReadMsgMetaCids(block.Messages) + if err != nil { + return nil, nil, nil, nil, err + } + + // FIXME: DRY. Use `chain.Message` interface. + bmi := make([]uint64, 0, len(bc)) + for _, m := range bc { + i, ok := blsmsgmap[m] + if !ok { + i = uint64(len(blscids)) + blscids = append(blscids, m) + blsmsgmap[m] = i + } + + bmi = append(bmi, i) + } + blsincl = append(blsincl, bmi) + + smi := make([]uint64, 0, len(sc)) + for _, m := range sc { + i, ok := secpkmsgmap[m] + if !ok { + i = uint64(len(secpkcids)) + secpkcids = append(secpkcids, m) + secpkmsgmap[m] = i + } + + smi = append(smi, i) + } + secpkincl = append(secpkincl, smi) + } + + blsmsgs, err := cs.LoadMessagesFromCids(blscids) + if err != nil { + return nil, nil, nil, nil, err + } + + secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) + if err != nil { + return nil, nil, nil, nil, err + } + + return blsmsgs, blsincl, secpkmsgs, secpkincl, nil +} diff --git a/chain/gen/gen.go b/chain/gen/gen.go index d476a0937..d05165ab1 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -3,43 +3,54 @@ package gen import ( "bytes" "context" - "crypto/sha256" - "encoding/binary" "fmt" "io/ioutil" "sync/atomic" + "time" - ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" - sectorbuilder "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + block "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-car" + "github.com/ipfs/go-cid" offline "github.com/ipfs/go-ipfs-exchange-offline" + format "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log/v2" "github.com/ipfs/go-merkledag" - peer "github.com/libp2p/go-libp2p-core/peer" + "github.com/ipld/go-car" "go.opencensus.io/trace" "golang.org/x/xerrors" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/beacon" + genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/genesis" + "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/node/repo" - - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" ) +const msgsPerBlock = 20 + +//nolint:deadcode,varcheck var log = logging.Logger("gen") -const msgsPerBlock = 20 +var ValidWpostForTesting = []proof.PoStProof{{ + ProofBytes: []byte("valid proof"), +}} type ChainGen struct { msgsPerBlock int @@ -48,16 +59,20 @@ type ChainGen struct { cs *store.ChainStore + beacon beacon.Schedule + sm *stmgr.StateManager genesis *types.BlockHeader CurTipset *store.FullTipSet - Timestamper func(*types.TipSet, uint64) uint64 + Timestamper func(*types.TipSet, abi.ChainEpoch) uint64 + + GetMessages func(*ChainGen) ([]*types.SignedMessage, error) w *wallet.Wallet - eppProvs map[address.Address]ElectionPoStProver + eppProvs map[address.Address]WinningPoStProver Miners []address.Address receivers []address.Address banker address.Address @@ -80,7 +95,35 @@ func (m mybs) Get(c cid.Cid) (block.Block, error) { return b, nil } -func NewGenerator() (*ChainGen, error) { +var rootkeyMultisig = genesis.MultisigMeta{ + Signers: []address.Address{remAccTestKey}, + Threshold: 1, + VestingDuration: 0, + VestingStart: 0, +} + +var DefaultVerifregRootkeyActor = genesis.Actor{ + Type: genesis.TMultisig, + Balance: big.NewInt(0), + Meta: rootkeyMultisig.ActorMeta(), +} + +var remAccTestKey, _ = address.NewFromString("t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy") +var remAccMeta = genesis.MultisigMeta{ + Signers: []address.Address{remAccTestKey}, + Threshold: 1, +} + +var DefaultRemainderAccountActor = genesis.Actor{ + Type: genesis.TMultisig, + Balance: big.NewInt(0), + Meta: remAccMeta.ActorMeta(), +} + +func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { + // TODO: we really shouldn't modify a global variable here. + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + mr := repo.NewMemory(nil) lr, err := mr.Lock(repo.StorageMiner) if err != nil { @@ -92,12 +135,12 @@ func NewGenerator() (*ChainGen, error) { return nil, xerrors.Errorf("failed to get metadata datastore: %w", err) } - bds, err := lr.Datastore("/blocks") + bds, err := lr.Datastore("/chain") if err != nil { return nil, xerrors.Errorf("failed to get blocks datastore: %w", err) } - bs := mybs{blockstore.NewIdStore(blockstore.NewBlockstore(bds))} + bs := mybs{blockstore.NewBlockstore(bds)} ks, err := lr.KeyStore() if err != nil { @@ -109,77 +152,88 @@ func NewGenerator() (*ChainGen, error) { return nil, xerrors.Errorf("creating memrepo wallet failed: %w", err) } - banker, err := w.GenerateKey(types.KTSecp256k1) + banker, err := w.GenerateKey(crypto.SigTypeSecp256k1) if err != nil { return nil, xerrors.Errorf("failed to generate banker key: %w", err) } receievers := make([]address.Address, msgsPerBlock) for r := range receievers { - receievers[r], err = w.GenerateKey(types.KTBLS) + receievers[r], err = w.GenerateKey(crypto.SigTypeBLS) if err != nil { return nil, xerrors.Errorf("failed to generate receiver key: %w", err) } } - maddr1, err := address.NewFromString("t0300") - if err != nil { - return nil, err - } + maddr1 := genesis2.MinerAddress(0) m1temp, err := ioutil.TempDir("", "preseal") if err != nil { return nil, err } - genm1, err := seed.PreSeal(maddr1, 1024, 0, 1, m1temp, []byte("some randomness")) + genm1, k1, err := seed.PreSeal(maddr1, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, numSectors, m1temp, []byte("some randomness"), nil, true) if err != nil { return nil, err } - maddr2, err := address.NewFromString("t0301") - if err != nil { - return nil, err - } + maddr2 := genesis2.MinerAddress(1) m2temp, err := ioutil.TempDir("", "preseal") if err != nil { return nil, err } - genm2, err := seed.PreSeal(maddr2, 1024, 0, 1, m2temp, []byte("some randomness")) + genm2, k2, err := seed.PreSeal(maddr2, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, numSectors, m2temp, []byte("some randomness"), nil, true) if err != nil { return nil, err } - mk1, err := w.Import(&genm1.Key) + mk1, err := w.Import(k1) if err != nil { return nil, err } - mk2, err := w.Import(&genm2.Key) + mk2, err := w.Import(k2) if err != nil { return nil, err } - minercfg := &GenMinerCfg{ - PeerIDs: []peer.ID{"peerID1", "peerID2"}, - PreSeals: map[string]genesis.GenesisMiner{ - maddr1.String(): *genm1, - maddr2.String(): *genm2, + sys := vm.Syscalls(&genFakeVerifier{}) + + tpl := genesis.Template{ + Accounts: []genesis.Actor{ + { + Type: genesis.TAccount, + Balance: types.FromFil(20_000_000), + Meta: (&genesis.AccountMeta{Owner: mk1}).ActorMeta(), + }, + { + Type: genesis.TAccount, + Balance: types.FromFil(20_000_000), + Meta: (&genesis.AccountMeta{Owner: mk2}).ActorMeta(), + }, + { + Type: genesis.TAccount, + Balance: types.FromFil(50000), + Meta: (&genesis.AccountMeta{Owner: banker}).ActorMeta(), + }, }, - MinerAddrs: []address.Address{maddr1, maddr2}, + Miners: []genesis.Miner{ + *genm1, + *genm2, + }, + VerifregRootKey: DefaultVerifregRootkeyActor, + RemainderAccount: DefaultRemainderAccountActor, + NetworkName: "", + Timestamp: uint64(build.Clock.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()), } - genb, err := MakeGenesisBlock(bs, map[address.Address]types.BigInt{ - mk1: types.FromFil(40000), - mk2: types.FromFil(40000), - banker: types.FromFil(50000), - }, minercfg, 100000) + genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl) if err != nil { return nil, xerrors.Errorf("make genesis block failed: %w", err) } - cs := store.NewChainStore(bs, ds) + cs := store.NewChainStore(bs, ds, sys) genfb := &types.FullBlock{Header: genb.Genesis} gents := store.NewFullTipSet([]*types.FullBlock{genfb}) @@ -188,29 +242,35 @@ func NewGenerator() (*ChainGen, error) { return nil, xerrors.Errorf("set genesis failed: %w", err) } - if len(minercfg.MinerAddrs) == 0 { - return nil, xerrors.Errorf("MakeGenesisBlock failed to set miner address") - } - - mgen := make(map[address.Address]ElectionPoStProver) - for _, m := range minercfg.MinerAddrs { - mgen[m] = &eppProvider{} + mgen := make(map[address.Address]WinningPoStProver) + for i := range tpl.Miners { + mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{} } sm := stmgr.NewStateManager(cs) + miners := []address.Address{maddr1, maddr2} + + beac := beacon.Schedule{{Start: 0, Beacon: beacon.NewMockBeacon(time.Second)}} + //beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelaySecs) + //if err != nil { + //return nil, xerrors.Errorf("creating drand beacon: %w", err) + //} + gen := &ChainGen{ bs: bs, cs: cs, sm: sm, msgsPerBlock: msgsPerBlock, genesis: genb.Genesis, + beacon: beac, w: w, - Miners: minercfg.MinerAddrs, - eppProvs: mgen, - banker: banker, - receivers: receievers, + GetMessages: getRandomMessages, + Miners: miners, + eppProvs: mgen, + banker: banker, + receivers: receievers, CurTipset: gents, @@ -221,6 +281,22 @@ func NewGenerator() (*ChainGen, error) { return gen, nil } +func NewGenerator() (*ChainGen, error) { + return NewGeneratorWithSectors(1) +} + +func (cg *ChainGen) StateManager() *stmgr.StateManager { + return cg.sm +} + +func (cg *ChainGen) SetStateManager(sm *stmgr.StateManager) { + cg.sm = sm +} + +func (cg *ChainGen) ChainStore() *store.ChainStore { + return cg.cs +} + func (cg *ChainGen) Genesis() *types.BlockHeader { return cg.genesis } @@ -232,46 +308,77 @@ func (cg *ChainGen) GenesisCar() ([]byte, error) { out := new(bytes.Buffer) - if err := car.WriteCar(context.TODO(), dserv, []cid.Cid{cg.Genesis().Cid()}, out); err != nil { + if err := car.WriteCarWithWalker(context.TODO(), dserv, []cid.Cid{cg.Genesis().Cid()}, out, CarWalkFunc); err != nil { return nil, xerrors.Errorf("genesis car write car failed: %w", err) } return out.Bytes(), nil } -func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m address.Address, round int64) (*types.EPostProof, *types.Ticket, error) { +func CarWalkFunc(nd format.Node) (out []*format.Link, err error) { + for _, link := range nd.Links() { + pref := link.Cid.Prefix() + if pref.Codec == cid.FilCommitmentSealed || pref.Codec == cid.FilCommitmentUnsealed { + continue + } + out = append(out, link) + } - lastTicket := pts.MinTicket() + return out, nil +} + +func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m address.Address, round abi.ChainEpoch) ([]types.BeaconEntry, *types.ElectionProof, *types.Ticket, error) { + mc := &mca{w: cg.w, sm: cg.sm, pv: ffiwrapper.ProofVerifier, bcn: cg.beacon} + + mbi, err := mc.MinerGetBaseInfo(ctx, m, round, pts.Key()) + if err != nil { + return nil, nil, nil, xerrors.Errorf("get miner base info: %w", err) + } + + prev := mbi.PrevBeaconEntry + + entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, pts.Height(), prev) + if err != nil { + return nil, nil, nil, xerrors.Errorf("get beacon entries for block: %w", err) + } + + rbase := prev + if len(entries) > 0 { + rbase = entries[len(entries)-1] + } + + eproof, err := IsRoundWinner(ctx, pts, round, m, rbase, mbi, mc) + if err != nil { + return nil, nil, nil, xerrors.Errorf("checking round winner failed: %w", err) + } + + buf := new(bytes.Buffer) + if err := m.MarshalCBOR(buf); err != nil { + return nil, nil, nil, xerrors.Errorf("failed to cbor marshal address: %w", err) + } + + if round > build.UpgradeSmokeHeight { + buf.Write(pts.MinTicket().VRFProof) + } + + ticketRand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes()) + if err != nil { + return nil, nil, nil, err + } st := pts.ParentState() worker, err := stmgr.GetMinerWorkerRaw(ctx, cg.sm, st, m) if err != nil { - return nil, nil, xerrors.Errorf("get miner worker: %w", err) + return nil, nil, nil, xerrors.Errorf("get miner worker: %w", err) } - vrfout, err := ComputeVRF(ctx, cg.w.Sign, worker, m, DSepTicket, lastTicket.VRFProof) + vrfout, err := ComputeVRF(ctx, cg.w.Sign, worker, ticketRand) if err != nil { - return nil, nil, xerrors.Errorf("compute VRF: %w", err) + return nil, nil, nil, xerrors.Errorf("compute VRF: %w", err) } - tick := &types.Ticket{ - VRFProof: vrfout, - } - - eproofin, err := IsRoundWinner(ctx, pts, round, m, cg.eppProvs[m], &mca{w: cg.w, sm: cg.sm}) - if err != nil { - return nil, nil, xerrors.Errorf("checking round winner failed: %w", err) - } - if eproofin == nil { - return nil, tick, nil - } - eproof, err := ComputeProof(ctx, cg.eppProvs[m], eproofin) - if err != nil { - return nil, nil, xerrors.Errorf("computing proof: %w", err) - } - - return eproof, tick, nil + return entries, eproof, &types.Ticket{VRFProof: vrfout}, nil } type MinedTipSet struct { @@ -289,23 +396,50 @@ func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) { return mts, nil } -func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { - var blks []*types.FullBlock +func (cg *ChainGen) SetWinningPoStProver(m address.Address, wpp WinningPoStProver) { + cg.eppProvs[m] = wpp +} - msgs, err := cg.getRandomMessages() +func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { + ms, err := cg.GetMessages(cg) if err != nil { return nil, xerrors.Errorf("get random messages: %w", err) } - for round := int64(base.Height() + 1); len(blks) == 0; round++ { - for _, m := range miners { - proof, t, err := cg.nextBlockProof(context.TODO(), base, m, round) + msgs := make([][]*types.SignedMessage, len(miners)) + for i := range msgs { + msgs[i] = ms + } + + fts, err := cg.NextTipSetFromMinersWithMessages(base, miners, msgs) + if err != nil { + return nil, err + } + + return &MinedTipSet{ + TipSet: fts, + Messages: ms, + }, nil +} + +func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage) (*store.FullTipSet, error) { + var blks []*types.FullBlock + + for round := base.Height() + 1; len(blks) == 0; round++ { + for mi, m := range miners { + bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round) if err != nil { return nil, xerrors.Errorf("next block proof: %w", err) } - if proof != nil { - fblk, err := cg.makeBlock(base, m, proof, t, uint64(round), msgs) + if et != nil { + // TODO: maybe think about passing in more real parameters to this? + wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil) + if err != nil { + return nil, err + } + + fblk, err := cg.makeBlock(base, m, ticket, et, bvals, round, wpost, msgs[mi]) if err != nil { return nil, xerrors.Errorf("making a block for next tipset failed: %w", err) } @@ -319,24 +453,31 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad } } - fts := store.NewFullTipSet(blks) - - return &MinedTipSet{ - TipSet: fts, - Messages: msgs, - }, nil + return store.NewFullTipSet(blks), nil } -func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, eproof *types.EPostProof, ticket *types.Ticket, height uint64, msgs []*types.SignedMessage) (*types.FullBlock, error) { +func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, + eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, + wpost []proof.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { var ts uint64 if cg.Timestamper != nil { ts = cg.Timestamper(parents, height-parents.Height()) } else { - ts = parents.MinTimestamp() + ((height - parents.Height()) * build.BlockDelay) + ts = parents.MinTimestamp() + uint64(height-parents.Height())*build.BlockDelaySecs } - fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, m, parents, ticket, eproof, msgs, height, ts) + fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{ + Miner: m, + Parents: parents.Key(), + Ticket: vrfticket, + Eproof: eticket, + BeaconValues: bvals, + Messages: msgs, + Epoch: height, + Timestamp: ts, + WinningPoStProof: wpost, + }) if err != nil { return nil, err } @@ -344,19 +485,31 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, eproof * return fblk, err } -// This function is awkward. It's used to deal with messages made when +// ResyncBankerNonce is used for dealing with messages made when // simulating forks func (cg *ChainGen) ResyncBankerNonce(ts *types.TipSet) error { - act, err := cg.sm.GetActor(cg.banker, ts) + st, err := cg.sm.ParentState(ts) + if err != nil { + return err + } + act, err := st.GetActor(cg.banker) if err != nil { return err } - cg.bankerNonce = act.Nonce + return nil } -func (cg *ChainGen) getRandomMessages() ([]*types.SignedMessage, error) { +func (cg *ChainGen) Banker() address.Address { + return cg.banker +} + +func (cg *ChainGen) Wallet() *wallet.Wallet { + return cg.w +} + +func getRandomMessages(cg *ChainGen) ([]*types.SignedMessage, error) { msgs := make([]*types.SignedMessage, cg.msgsPerBlock) for m := range msgs { msg := types.Message{ @@ -369,8 +522,9 @@ func (cg *ChainGen) getRandomMessages() ([]*types.SignedMessage, error) { Method: 0, - GasLimit: types.NewInt(10000), - GasPrice: types.NewInt(0), + GasLimit: 100_000_000, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), } sig, err := cg.w.Sign(context.TODO(), cg.banker, msg.Cid().Bytes()) @@ -395,240 +549,137 @@ func (cg *ChainGen) YieldRepo() (repo.Repo, error) { } type MiningCheckAPI interface { - ChainGetRandomness(context.Context, types.TipSetKey, int64) ([]byte, error) + ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) + ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) - StateMinerPower(context.Context, address.Address, *types.TipSet) (api.MinerPower, error) + MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) - StateMinerWorker(context.Context, address.Address, *types.TipSet) (address.Address, error) - - StateMinerSectorSize(context.Context, address.Address, *types.TipSet) (uint64, error) - - StateMinerProvingSet(context.Context, address.Address, *types.TipSet) ([]*api.ChainSectorInfo, error) - - WalletSign(context.Context, address.Address, []byte) (*types.Signature, error) + WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) } type mca struct { - w *wallet.Wallet - sm *stmgr.StateManager + w *wallet.Wallet + sm *stmgr.StateManager + pv ffiwrapper.Verifier + bcn beacon.Schedule } -func (mca mca) ChainGetRandomness(ctx context.Context, pts types.TipSetKey, lb int64) ([]byte, error) { - return mca.sm.ChainStore().GetRandomness(ctx, pts.Cids(), int64(lb)) -} - -func (mca mca) StateMinerPower(ctx context.Context, maddr address.Address, ts *types.TipSet) (api.MinerPower, error) { - mpow, tpow, err := stmgr.GetPower(ctx, mca.sm, ts, maddr) +func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + pts, err := mca.sm.ChainStore().LoadTipSet(tsk) if err != nil { - return api.MinerPower{}, err + return nil, xerrors.Errorf("loading tipset key: %w", err) } - return api.MinerPower{ - MinerPower: mpow, - TotalPower: tpow, - }, err + return mca.sm.ChainStore().GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) } -func (mca mca) StateMinerWorker(ctx context.Context, maddr address.Address, ts *types.TipSet) (address.Address, error) { - return stmgr.GetMinerWorkerRaw(ctx, mca.sm, ts.ParentState(), maddr) +func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { + pts, err := mca.sm.ChainStore().LoadTipSet(tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset key: %w", err) + } + + return mca.sm.ChainStore().GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) } -func (mca mca) StateMinerSectorSize(ctx context.Context, maddr address.Address, ts *types.TipSet) (uint64, error) { - return stmgr.GetMinerSectorSize(ctx, mca.sm, ts, maddr) +func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { + return stmgr.MinerGetBaseInfo(ctx, mca.sm, mca.bcn, tsk, epoch, maddr, mca.pv) } -func (mca mca) StateMinerProvingSet(ctx context.Context, maddr address.Address, ts *types.TipSet) ([]*api.ChainSectorInfo, error) { - return stmgr.GetMinerProvingSet(ctx, mca.sm, ts, maddr) -} - -func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*types.Signature, error) { +func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*crypto.Signature, error) { return mca.w.Sign(ctx, a, v) } -type ElectionPoStProver interface { - GenerateCandidates(context.Context, sectorbuilder.SortedPublicSectorInfo, []byte) ([]sectorbuilder.EPostCandidate, error) - ComputeProof(context.Context, sectorbuilder.SortedPublicSectorInfo, []byte, []sectorbuilder.EPostCandidate) ([]byte, error) +type WinningPoStProver interface { + GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) + ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) } -type eppProvider struct{} +type wppProvider struct{} -func (epp *eppProvider) GenerateCandidates(ctx context.Context, _ sectorbuilder.SortedPublicSectorInfo, eprand []byte) ([]sectorbuilder.EPostCandidate, error) { - return []sectorbuilder.EPostCandidate{ - { - SectorID: 1, - PartialTicket: [32]byte{}, - Ticket: [32]byte{}, - SectorChallengeIndex: 1, - }, - }, nil +func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandomness, _ uint64) ([]uint64, error) { + return []uint64{0}, nil } -func (epp *eppProvider) ComputeProof(ctx context.Context, _ sectorbuilder.SortedPublicSectorInfo, eprand []byte, winners []sectorbuilder.EPostCandidate) ([]byte, error) { - - return []byte("valid proof"), nil +func (wpp *wppProvider) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) { + return ValidWpostForTesting, nil } -type ProofInput struct { - sectors sectorbuilder.SortedPublicSectorInfo - hvrf []byte - winners []sectorbuilder.EPostCandidate - vrfout []byte -} +func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch, + miner address.Address, brand types.BeaconEntry, mbi *api.MiningBaseInfo, a MiningCheckAPI) (*types.ElectionProof, error) { -func IsRoundWinner(ctx context.Context, ts *types.TipSet, round int64, miner address.Address, epp ElectionPoStProver, a MiningCheckAPI) (*ProofInput, error) { - r, err := a.ChainGetRandomness(ctx, ts.Key(), round-build.EcRandomnessLookback) - if err != nil { - return nil, xerrors.Errorf("chain get randomness: %w", err) + buf := new(bytes.Buffer) + if err := miner.MarshalCBOR(buf); err != nil { + return nil, xerrors.Errorf("failed to cbor marshal address: %w", err) } - mworker, err := a.StateMinerWorker(ctx, miner, ts) + electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes()) if err != nil { - return nil, xerrors.Errorf("failed to get miner worker: %w", err) + return nil, xerrors.Errorf("failed to draw randomness: %w", err) } - vrfout, err := ComputeVRF(ctx, a.WalletSign, mworker, miner, DSepElectionPost, r) + vrfout, err := ComputeVRF(ctx, a.WalletSign, mbi.WorkerKey, electionRand) if err != nil { return nil, xerrors.Errorf("failed to compute VRF: %w", err) } - pset, err := a.StateMinerProvingSet(ctx, miner, ts) - if err != nil { - return nil, xerrors.Errorf("failed to load proving set for miner: %w", err) - } - if len(pset) == 0 { + ep := &types.ElectionProof{VRFProof: vrfout} + j := ep.ComputeWinCount(mbi.MinerPower, mbi.NetworkPower) + ep.WinCount = j + if j < 1 { return nil, nil } - var sinfos []ffi.PublicSectorInfo - for _, s := range pset { - var commRa [32]byte - copy(commRa[:], s.CommR) - sinfos = append(sinfos, ffi.PublicSectorInfo{ - SectorID: s.SectorID, - CommR: commRa, - }) - } - sectors := sectorbuilder.NewSortedPublicSectorInfo(sinfos) - - hvrf := sha256.Sum256(vrfout) - candidates, err := epp.GenerateCandidates(ctx, sectors, hvrf[:]) - if err != nil { - return nil, xerrors.Errorf("failed to generate electionPoSt candidates: %w", err) - } - - pow, err := a.StateMinerPower(ctx, miner, ts) - if err != nil { - return nil, xerrors.Errorf("failed to check power: %w", err) - } - - ssize, err := a.StateMinerSectorSize(ctx, miner, ts) - if err != nil { - return nil, xerrors.Errorf("failed to look up miners sector size: %w", err) - } - - var winners []sectorbuilder.EPostCandidate - for _, c := range candidates { - if types.IsTicketWinner(c.PartialTicket[:], ssize, uint64(len(sinfos)), pow.TotalPower) { - winners = append(winners, c) - } - } - - // no winners, sad - if len(winners) == 0 { - return nil, nil - } - - return &ProofInput{ - sectors: sectors, - hvrf: hvrf[:], - winners: winners, - vrfout: vrfout, - }, nil + return ep, nil } -func ComputeProof(ctx context.Context, epp ElectionPoStProver, pi *ProofInput) (*types.EPostProof, error) { - proof, err := epp.ComputeProof(ctx, pi.sectors, pi.hvrf, pi.winners) - if err != nil { - return nil, xerrors.Errorf("failed to compute snark for election proof: %w", err) - } +type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error) - ept := types.EPostProof{ - Proof: proof, - PostRand: pi.vrfout, - } - for _, win := range pi.winners { - part := make([]byte, 32) - copy(part, win.PartialTicket[:]) - ept.Candidates = append(ept.Candidates, types.EPostTicket{ - Partial: part, - SectorID: win.SectorID, - ChallengeIndex: win.SectorChallengeIndex, - }) - } - - return &ept, nil -} - -type SignFunc func(context.Context, address.Address, []byte) (*types.Signature, error) - -const ( - DSepTicket = 1 - DSepElectionPost = 2 -) - -func hashVRFBase(personalization uint64, miner address.Address, input []byte) ([]byte, error) { - if miner.Protocol() != address.ID { - return nil, xerrors.Errorf("miner address for compute VRF must be an ID address") - } - - var persbuf [8]byte - binary.LittleEndian.PutUint64(persbuf[:], personalization) - - h := sha256.New() - h.Write(persbuf[:]) - h.Write([]byte{0}) - h.Write(input) - h.Write([]byte{0}) - h.Write(miner.Bytes()) - - return h.Sum(nil), nil -} - -func VerifyVRF(ctx context.Context, worker, miner address.Address, p uint64, input, vrfproof []byte) error { +func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error { _, span := trace.StartSpan(ctx, "VerifyVRF") defer span.End() - vrfBase, err := hashVRFBase(p, miner, input) - if err != nil { - return xerrors.Errorf("computing vrf base failed: %w", err) - } - - sig := &types.Signature{ - Type: types.KTBLS, + sig := &crypto.Signature{ + Type: crypto.SigTypeBLS, Data: vrfproof, } - if err := sig.Verify(worker, vrfBase); err != nil { + if err := sigs.Verify(sig, worker, vrfBase); err != nil { return xerrors.Errorf("vrf was invalid: %w", err) } return nil } -func ComputeVRF(ctx context.Context, sign SignFunc, worker, miner address.Address, p uint64, input []byte) ([]byte, error) { - sigInput, err := hashVRFBase(p, miner, input) - if err != nil { - return nil, err - } - +func ComputeVRF(ctx context.Context, sign SignFunc, worker address.Address, sigInput []byte) ([]byte, error) { sig, err := sign(ctx, worker, sigInput) if err != nil { return nil, err } - if sig.Type != types.KTBLS { + if sig.Type != crypto.SigTypeBLS { return nil, fmt.Errorf("miner worker address was not a BLS key") } return sig.Data, nil } + +type genFakeVerifier struct{} + +var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) + +func (m genFakeVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { + return true, nil +} + +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proof abi.RegisteredPoStProof, id abi.ActorID, randomness abi.PoStRandomness, u uint64) ([]uint64, error) { + panic("not supported") +} diff --git a/chain/gen/gen_test.go b/chain/gen/gen_test.go index 260259869..8c38328d0 100644 --- a/chain/gen/gen_test.go +++ b/chain/gen/gen_test.go @@ -3,18 +3,23 @@ package gen import ( "testing" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/policy" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" ) func init() { - build.SectorSizes = []uint64{1024} - build.MinimumMinerPower = 1024 + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) } -func testGeneration(t testing.TB, n int, msgs int) { - g, err := NewGenerator() +func testGeneration(t testing.TB, n int, msgs int, sectors int) { + g, err := NewGeneratorWithSectors(sectors) if err != nil { - t.Fatal(err) + t.Fatalf("%+v", err) } g.msgsPerBlock = msgs @@ -22,30 +27,31 @@ func testGeneration(t testing.TB, n int, msgs int) { for i := 0; i < n; i++ { mts, err := g.NextTipSet() if err != nil { - t.Fatalf("error at H:%d, %s", i, err) + t.Fatalf("error at H:%d, %+v", i, err) } _ = mts } } func TestChainGeneration(t *testing.T) { - testGeneration(t, 10, 20) + t.Run("10-20-1", func(t *testing.T) { testGeneration(t, 10, 20, 1) }) + t.Run("10-20-25", func(t *testing.T) { testGeneration(t, 10, 20, 25) }) } func BenchmarkChainGeneration(b *testing.B) { b.Run("0-messages", func(b *testing.B) { - testGeneration(b, b.N, 0) + testGeneration(b, b.N, 0, 1) }) b.Run("10-messages", func(b *testing.B) { - testGeneration(b, b.N, 10) + testGeneration(b, b.N, 10, 1) }) b.Run("100-messages", func(b *testing.B) { - testGeneration(b, b.N, 100) + testGeneration(b, b.N, 100, 1) }) b.Run("1000-messages", func(b *testing.B) { - testGeneration(b, b.N, 1000) + testGeneration(b, b.N, 1000, 1) }) } diff --git a/chain/gen/genesis/genblock.go b/chain/gen/genesis/genblock.go new file mode 100644 index 000000000..f26659cdf --- /dev/null +++ b/chain/gen/genesis/genblock.go @@ -0,0 +1,41 @@ +package genesis + +import ( + "encoding/hex" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +const genesisMultihashString = "1220107d821c25dc0735200249df94a8bebc9c8e489744f86a4ca8919e81f19dcd72" +const genesisBlockHex = "a5684461746574696d6573323031372d30352d30352030313a32373a3531674e6574776f726b6846696c65636f696e65546f6b656e6846696c65636f696e6c546f6b656e416d6f756e7473a36b546f74616c537570706c796d322c3030302c3030302c303030664d696e6572736d312c3430302c3030302c3030306c50726f746f636f6c4c616273a36b446576656c6f706d656e746b3330302c3030302c3030306b46756e6472616973696e676b3230302c3030302c3030306a466f756e646174696f6e6b3130302c3030302c303030674d657373616765784854686973206973207468652047656e6573697320426c6f636b206f66207468652046696c65636f696e20446563656e7472616c697a65642053746f72616765204e6574776f726b2e" + +var cidBuilder = cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.SHA2_256} + +func expectedCid() cid.Cid { + mh, err := multihash.FromHexString(genesisMultihashString) + if err != nil { + panic(err) + } + return cid.NewCidV1(cidBuilder.Codec, mh) +} + +func getGenesisBlock() (blocks.Block, error) { + genesisBlockData, err := hex.DecodeString(genesisBlockHex) + if err != nil { + return nil, err + } + + genesisCid, err := cidBuilder.Sum(genesisBlockData) + if err != nil { + return nil, err + } + + block, err := blocks.NewBlockWithCid(genesisBlockData, genesisCid) + if err != nil { + return nil, err + } + + return block, nil +} diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go new file mode 100644 index 000000000..039e284cd --- /dev/null +++ b/chain/gen/genesis/genesis.go @@ -0,0 +1,580 @@ +package genesis + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + account0 "github.com/filecoin-project/specs-actors/actors/builtin/account" + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/genesis" + bstore "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/lib/sigs" +) + +const AccountStart = 100 +const MinerStart = 1000 +const MaxAccounts = MinerStart - AccountStart + +var log = logging.Logger("genesis") + +type GenesisBootstrap struct { + Genesis *types.BlockHeader +} + +/* +From a list of parameters, create a genesis block / initial state + +The process: +- Bootstrap state (MakeInitialStateTree) + - Create empty state + - Create system actor + - Make init actor + - Create accounts mappings + - Set NextID to MinerStart + - Setup Reward (1.4B fil) + - Setup Cron + - Create empty power actor + - Create empty market + - Create verified registry + - Setup burnt fund address + - Initialize account / msig balances +- Instantiate early vm with genesis syscalls + - Create miners + - Each: + - power.CreateMiner, set msg value to PowerBalance + - market.AddFunds with correct value + - market.PublishDeals for related sectors + - Set network power in the power actor to what we'll have after genesis creation + - Recreate reward actor state with the right power + - For each precommitted sector + - Get deal weight + - Calculate QA Power + - Remove fake power from the power actor + - Calculate pledge + - Precommit + - Confirm valid + +Data Types: + +PreSeal :{ + CommR CID + CommD CID + SectorID SectorNumber + Deal market.DealProposal # Start at 0, self-deal! +} + +Genesis: { + Accounts: [ # non-miner, non-singleton actors, max len = MaxAccounts + { + Type: "account" / "multisig", + Value: "attofil", + [Meta: {msig settings, account key..}] + },... + ], + Miners: [ + { + Owner, Worker Addr # ID + MarketBalance, PowerBalance TokenAmount + SectorSize uint64 + PreSeals []PreSeal + },... + ], +} + +*/ + +func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template genesis.Template) (*state.StateTree, map[address.Address]address.Address, error) { + // Create empty state tree + + cst := cbor.NewCborStore(bs) + _, err := cst.Put(context.TODO(), []struct{}{}) + if err != nil { + return nil, nil, xerrors.Errorf("putting empty object: %w", err) + } + + state, err := state.NewStateTree(cst, builtin.Version0) + if err != nil { + return nil, nil, xerrors.Errorf("making new state tree: %w", err) + } + + // Create system actor + + sysact, err := SetupSystemActor(bs) + if err != nil { + return nil, nil, xerrors.Errorf("setup init actor: %w", err) + } + if err := state.SetActor(builtin0.SystemActorAddr, sysact); err != nil { + return nil, nil, xerrors.Errorf("set init actor: %w", err) + } + + // Create init actor + + idStart, initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey) + if err != nil { + return nil, nil, xerrors.Errorf("setup init actor: %w", err) + } + if err := state.SetActor(builtin0.InitActorAddr, initact); err != nil { + return nil, nil, xerrors.Errorf("set init actor: %w", err) + } + + // Setup reward + // RewardActor's state is overrwritten by SetupStorageMiners + rewact, err := SetupRewardActor(bs, big.Zero()) + if err != nil { + return nil, nil, xerrors.Errorf("setup init actor: %w", err) + } + + err = state.SetActor(builtin0.RewardActorAddr, rewact) + if err != nil { + return nil, nil, xerrors.Errorf("set network account actor: %w", err) + } + + // Setup cron + cronact, err := SetupCronActor(bs) + if err != nil { + return nil, nil, xerrors.Errorf("setup cron actor: %w", err) + } + if err := state.SetActor(builtin0.CronActorAddr, cronact); err != nil { + return nil, nil, xerrors.Errorf("set cron actor: %w", err) + } + + // Create empty power actor + spact, err := SetupStoragePowerActor(bs) + if err != nil { + return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + } + if err := state.SetActor(builtin0.StoragePowerActorAddr, spact); err != nil { + return nil, nil, xerrors.Errorf("set storage market actor: %w", err) + } + + // Create empty market actor + marketact, err := SetupStorageMarketActor(bs) + if err != nil { + return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + } + if err := state.SetActor(builtin0.StorageMarketActorAddr, marketact); err != nil { + return nil, nil, xerrors.Errorf("set market actor: %w", err) + } + + // Create verified registry + verifact, err := SetupVerifiedRegistryActor(bs) + if err != nil { + return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + } + if err := state.SetActor(builtin0.VerifiedRegistryActorAddr, verifact); err != nil { + return nil, nil, xerrors.Errorf("set market actor: %w", err) + } + + burntRoot, err := cst.Put(ctx, &account0.State{ + Address: builtin0.BurntFundsActorAddr, + }) + if err != nil { + return nil, nil, xerrors.Errorf("failed to setup burnt funds actor state: %w", err) + } + + // Setup burnt-funds + err = state.SetActor(builtin0.BurntFundsActorAddr, &types.Actor{ + Code: builtin0.AccountActorCodeID, + Balance: types.NewInt(0), + Head: burntRoot, + }) + if err != nil { + return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err) + } + + // Create accounts + for _, info := range template.Accounts { + + switch info.Type { + case genesis.TAccount: + if err := createAccountActor(ctx, cst, state, info, keyIDs); err != nil { + return nil, nil, xerrors.Errorf("failed to create account actor: %w", err) + } + + case genesis.TMultisig: + + ida, err := address.NewIDAddress(uint64(idStart)) + if err != nil { + return nil, nil, err + } + idStart++ + + if err := createMultisigAccount(ctx, bs, cst, state, ida, info, keyIDs); err != nil { + return nil, nil, err + } + default: + return nil, nil, xerrors.New("unsupported account type") + } + + } + + vregroot, err := address.NewIDAddress(80) + if err != nil { + return nil, nil, err + } + + if err = createMultisigAccount(ctx, bs, cst, state, vregroot, template.VerifregRootKey, keyIDs); err != nil { + return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err) + } + + // Setup the first verifier as ID-address 81 + // TODO: remove this + skBytes, err := sigs.Generate(crypto.SigTypeBLS) + if err != nil { + return nil, nil, xerrors.Errorf("creating random verifier secret key: %w", err) + } + + verifierPk, err := sigs.ToPublic(crypto.SigTypeBLS, skBytes) + if err != nil { + return nil, nil, xerrors.Errorf("creating random verifier public key: %w", err) + } + + verifierAd, err := address.NewBLSAddress(verifierPk) + if err != nil { + return nil, nil, xerrors.Errorf("creating random verifier address: %w", err) + } + + verifierId, err := address.NewIDAddress(81) + if err != nil { + return nil, nil, err + } + + verifierState, err := cst.Put(ctx, &account0.State{Address: verifierAd}) + if err != nil { + return nil, nil, err + } + + err = state.SetActor(verifierId, &types.Actor{ + Code: builtin0.AccountActorCodeID, + Balance: types.NewInt(0), + Head: verifierState, + }) + if err != nil { + return nil, nil, xerrors.Errorf("setting account from actmap: %w", err) + } + + totalFilAllocated := big.Zero() + + // flush as ForEach works on the HAMT + if _, err := state.Flush(ctx); err != nil { + return nil, nil, err + } + err = state.ForEach(func(addr address.Address, act *types.Actor) error { + totalFilAllocated = big.Add(totalFilAllocated, act.Balance) + return nil + }) + if err != nil { + return nil, nil, xerrors.Errorf("summing account balances in state tree: %w", err) + } + + totalFil := big.Mul(big.NewInt(int64(build.FilBase)), big.NewInt(int64(build.FilecoinPrecision))) + remainingFil := big.Sub(totalFil, totalFilAllocated) + if remainingFil.Sign() < 0 { + return nil, nil, xerrors.Errorf("somehow overallocated filecoin (allocated = %s)", types.FIL(totalFilAllocated)) + } + + remAccKey, err := address.NewIDAddress(90) + if err != nil { + return nil, nil, err + } + + template.RemainderAccount.Balance = remainingFil + + if err := createMultisigAccount(ctx, bs, cst, state, remAccKey, template.RemainderAccount, keyIDs); err != nil { + return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err) + } + + return state, keyIDs, nil +} + +func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address) error { + var ainfo genesis.AccountMeta + if err := json.Unmarshal(info.Meta, &ainfo); err != nil { + return xerrors.Errorf("unmarshaling account meta: %w", err) + } + st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner}) + if err != nil { + return err + } + + ida, ok := keyIDs[ainfo.Owner] + if !ok { + return fmt.Errorf("no registered ID for account actor: %s", ainfo.Owner) + } + + err = state.SetActor(ida, &types.Actor{ + Code: builtin0.AccountActorCodeID, + Balance: info.Balance, + Head: st, + }) + if err != nil { + return xerrors.Errorf("setting account from actmap: %w", err) + } + return nil +} + +func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address) error { + if info.Type != genesis.TMultisig { + return fmt.Errorf("can only call createMultisigAccount with multisig Actor info") + } + var ainfo genesis.MultisigMeta + if err := json.Unmarshal(info.Meta, &ainfo); err != nil { + return xerrors.Errorf("unmarshaling account meta: %w", err) + } + pending, err := adt0.MakeEmptyMap(adt0.WrapStore(ctx, cst)).Root() + if err != nil { + return xerrors.Errorf("failed to create empty map: %v", err) + } + + var signers []address.Address + + for _, e := range ainfo.Signers { + idAddress, ok := keyIDs[e] + if !ok { + return fmt.Errorf("no registered key ID for signer: %s", e) + } + + // Check if actor already exists + _, err := state.GetActor(e) + if err == nil { + signers = append(signers, idAddress) + continue + } + + st, err := cst.Put(ctx, &account0.State{Address: e}) + if err != nil { + return err + } + err = state.SetActor(idAddress, &types.Actor{ + Code: builtin0.AccountActorCodeID, + Balance: types.NewInt(0), + Head: st, + }) + if err != nil { + return xerrors.Errorf("setting account from actmap: %w", err) + } + signers = append(signers, idAddress) + } + + st, err := cst.Put(ctx, &multisig0.State{ + Signers: signers, + NumApprovalsThreshold: uint64(ainfo.Threshold), + StartEpoch: abi.ChainEpoch(ainfo.VestingStart), + UnlockDuration: abi.ChainEpoch(ainfo.VestingDuration), + PendingTxns: pending, + InitialBalance: info.Balance, + }) + if err != nil { + return err + } + err = state.SetActor(ida, &types.Actor{ + Code: builtin0.MultisigActorCodeID, + Balance: info.Balance, + Head: st, + }) + if err != nil { + return xerrors.Errorf("setting account from actmap: %w", err) + } + return nil +} + +func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template, keyIDs map[address.Address]address.Address) (cid.Cid, error) { + verifNeeds := make(map[address.Address]abi.PaddedPieceSize) + var sum abi.PaddedPieceSize + + vmopt := vm.VMOpts{ + StateBase: stateroot, + Epoch: 0, + Rand: &fakeRand{}, + Bstore: cs.Blockstore(), + Syscalls: mkFakedSigSyscalls(cs.VMSys()), + CircSupplyCalc: nil, + NtwkVersion: genesisNetworkVersion, + BaseFee: types.NewInt(0), + } + vm, err := vm.NewVM(ctx, &vmopt) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) + } + + for mi, m := range template.Miners { + for si, s := range m.Sectors { + if s.Deal.Provider != m.ID { + return cid.Undef, xerrors.Errorf("Sector %d in miner %d in template had mismatch in provider and miner ID: %s != %s", si, mi, s.Deal.Provider, m.ID) + } + + amt := s.Deal.PieceSize + verifNeeds[keyIDs[s.Deal.Client]] += amt + sum += amt + } + } + + verifregRoot, err := address.NewIDAddress(80) + if err != nil { + return cid.Undef, err + } + + verifier, err := address.NewIDAddress(81) + if err != nil { + return cid.Undef, err + } + + _, err = doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{ + + Address: verifier, + Allowance: abi.NewStoragePower(int64(sum)), // eh, close enough + + })) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create verifier: %w", err) + } + + for c, amt := range verifNeeds { + _, err := doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{ + Address: c, + Allowance: abi.NewStoragePower(int64(amt)), + })) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to add verified client: %w", err) + } + } + + st, err := vm.Flush(ctx) + if err != nil { + return cid.Cid{}, xerrors.Errorf("vm flush: %w", err) + } + + return st, nil +} + +func MakeGenesisBlock(ctx context.Context, bs bstore.Blockstore, sys vm.SyscallBuilder, template genesis.Template) (*GenesisBootstrap, error) { + st, keyIDs, err := MakeInitialStateTree(ctx, bs, template) + if err != nil { + return nil, xerrors.Errorf("make initial state tree failed: %w", err) + } + + stateroot, err := st.Flush(ctx) + if err != nil { + return nil, xerrors.Errorf("flush state tree failed: %w", err) + } + + // temp chainstore + cs := store.NewChainStore(bs, datastore.NewMapDatastore(), sys) + + // Verify PreSealed Data + stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs) + if err != nil { + return nil, xerrors.Errorf("failed to verify presealed data: %w", err) + } + + stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners) + if err != nil { + return nil, xerrors.Errorf("setup miners failed: %w", err) + } + + store := adt0.WrapStore(ctx, cbor.NewCborStore(bs)) + emptyroot, err := adt0.MakeEmptyArray(store).Root() + if err != nil { + return nil, xerrors.Errorf("amt build failed: %w", err) + } + + mm := &types.MsgMeta{ + BlsMessages: emptyroot, + SecpkMessages: emptyroot, + } + mmb, err := mm.ToStorageBlock() + if err != nil { + return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) + } + if err := bs.Put(mmb); err != nil { + return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) + } + + log.Infof("Empty Genesis root: %s", emptyroot) + + tickBuf := make([]byte, 32) + _, _ = rand.Read(tickBuf) + genesisticket := &types.Ticket{ + VRFProof: tickBuf, + } + + filecoinGenesisCid, err := cid.Decode("bafyreiaqpwbbyjo4a42saasj36kkrpv4tsherf2e7bvezkert2a7dhonoi") + if err != nil { + return nil, xerrors.Errorf("failed to decode filecoin genesis block CID: %w", err) + } + + if !expectedCid().Equals(filecoinGenesisCid) { + return nil, xerrors.Errorf("expectedCid != filecoinGenesisCid") + } + + gblk, err := getGenesisBlock() + if err != nil { + return nil, xerrors.Errorf("failed to construct filecoin genesis block: %w", err) + } + + if !filecoinGenesisCid.Equals(gblk.Cid()) { + return nil, xerrors.Errorf("filecoinGenesisCid != gblk.Cid") + } + + if err := bs.Put(gblk); err != nil { + return nil, xerrors.Errorf("failed writing filecoin genesis block to blockstore: %w", err) + } + + b := &types.BlockHeader{ + Miner: builtin0.SystemActorAddr, + Ticket: genesisticket, + Parents: []cid.Cid{filecoinGenesisCid}, + Height: 0, + ParentWeight: types.NewInt(0), + ParentStateRoot: stateroot, + Messages: mmb.Cid(), + ParentMessageReceipts: emptyroot, + BLSAggregate: nil, + BlockSig: nil, + Timestamp: template.Timestamp, + ElectionProof: new(types.ElectionProof), + BeaconEntries: []types.BeaconEntry{ + { + Round: 0, + Data: make([]byte, 32), + }, + }, + ParentBaseFee: abi.NewTokenAmount(build.InitialBaseFee), + } + + sb, err := b.ToStorageBlock() + if err != nil { + return nil, xerrors.Errorf("serializing block header failed: %w", err) + } + + if err := bs.Put(sb); err != nil { + return nil, xerrors.Errorf("putting header to blockstore: %w", err) + } + + return &GenesisBootstrap{ + Genesis: b, + }, nil +} diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go new file mode 100644 index 000000000..1023e5efa --- /dev/null +++ b/chain/gen/genesis/miners.go @@ -0,0 +1,411 @@ +package genesis + +import ( + "bytes" + "context" + "fmt" + "math/rand" + + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/specs-actors/actors/builtin" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "github.com/filecoin-project/specs-actors/actors/runtime" + + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/genesis" +) + +func MinerAddress(genesisIndex uint64) address.Address { + maddr, err := address.NewIDAddress(MinerStart + genesisIndex) + if err != nil { + panic(err) + } + + return maddr +} + +type fakedSigSyscalls struct { + runtime.Syscalls +} + +func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { + return nil +} + +func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { + return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls { + return &fakedSigSyscalls{ + base(ctx, cstate, cst), + } + } +} + +func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner) (cid.Cid, error) { + csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + + vmopt := &vm.VMOpts{ + StateBase: sroot, + Epoch: 0, + Rand: &fakeRand{}, + Bstore: cs.Blockstore(), + Syscalls: mkFakedSigSyscalls(cs.VMSys()), + CircSupplyCalc: csc, + NtwkVersion: genesisNetworkVersion, + BaseFee: types.NewInt(0), + } + + vm, err := vm.NewVM(ctx, vmopt) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) + } + + if len(miners) == 0 { + return cid.Undef, xerrors.New("no genesis miners") + } + + minerInfos := make([]struct { + maddr address.Address + + presealExp abi.ChainEpoch + + dealIDs []abi.DealID + }, len(miners)) + + for i, m := range miners { + // Create miner through power actor + i := i + m := m + + spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize) + if err != nil { + return cid.Undef, err + } + + { + constructorParams := &power0.CreateMinerParams{ + Owner: m.Worker, + Worker: m.Worker, + Peer: []byte(m.PeerId), + SealProofType: spt, + } + + params := mustEnc(constructorParams) + rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, builtin.MethodsPower.CreateMiner, params) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) + } + + var ma power0.CreateMinerReturn + if err := ma.UnmarshalCBOR(bytes.NewReader(rval)); err != nil { + return cid.Undef, xerrors.Errorf("unmarshaling CreateMinerReturn: %w", err) + } + + expma := MinerAddress(uint64(i)) + if ma.IDAddress != expma { + return cid.Undef, xerrors.Errorf("miner assigned wrong address: %s != %s", ma.IDAddress, expma) + } + minerInfos[i].maddr = ma.IDAddress + + // TODO: ActorUpgrade + err = vm.MutateState(ctx, minerInfos[i].maddr, func(cst cbor.IpldStore, st *miner0.State) error { + maxPeriods := miner0.MaxSectorExpirationExtension / miner0.WPoStProvingPeriod + minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + st.ProvingPeriodStart - 1 + + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("mutating state: %w", err) + } + } + + // Add market funds + + if m.MarketBalance.GreaterThan(big.Zero()) { + params := mustEnc(&minerInfos[i].maddr) + _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, builtin.MethodsMarket.AddBalance, params) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create genesis miner (add balance): %w", err) + } + } + + // Publish preseal deals + + { + publish := func(params *market.PublishStorageDealsParams) error { + fmt.Printf("publishing %d storage deals on miner %s with worker %s\n", len(params.Deals), params.Deals[0].Proposal.Provider, m.Worker) + + ret, err := doExecValue(ctx, vm, market.Address, m.Worker, big.Zero(), builtin.MethodsMarket.PublishStorageDeals, mustEnc(params)) + if err != nil { + return xerrors.Errorf("failed to create genesis miner (publish deals): %w", err) + } + var ids market.PublishStorageDealsReturn + if err := ids.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { + return xerrors.Errorf("unmarsahling publishStorageDeals result: %w", err) + } + + minerInfos[i].dealIDs = append(minerInfos[i].dealIDs, ids.IDs...) + return nil + } + + params := &market.PublishStorageDealsParams{} + for _, preseal := range m.Sectors { + preseal.Deal.VerifiedDeal = true + preseal.Deal.EndEpoch = minerInfos[i].presealExp + params.Deals = append(params.Deals, market.ClientDealProposal{ + Proposal: preseal.Deal, + ClientSignature: crypto.Signature{Type: crypto.SigTypeBLS}, // TODO: do we want to sign these? Or do we want to fake signatures for genesis setup? + }) + + if len(params.Deals) == cbg.MaxLength { + if err := publish(params); err != nil { + return cid.Undef, err + } + + params = &market.PublishStorageDealsParams{} + } + } + + if len(params.Deals) > 0 { + if err := publish(params); err != nil { + return cid.Undef, err + } + } + } + } + + // adjust total network power for equal pledge per sector + rawPow, qaPow := big.NewInt(0), big.NewInt(0) + { + for i, m := range miners { + for pi := range m.Sectors { + rawPow = types.BigAdd(rawPow, types.NewInt(uint64(m.SectorSize))) + + dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, []abi.DealID{minerInfos[i].dealIDs[pi]}, 0, minerInfos[i].presealExp) + if err != nil { + return cid.Undef, xerrors.Errorf("getting deal weight: %w", err) + } + + sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight) + + qaPow = types.BigAdd(qaPow, sectorWeight) + } + } + + err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { + st.TotalQualityAdjPower = qaPow + st.TotalRawBytePower = rawPow + + st.ThisEpochQualityAdjPower = qaPow + st.ThisEpochRawBytePower = rawPow + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("mutating state: %w", err) + } + + err = vm.MutateState(ctx, reward.Address, func(sct cbor.IpldStore, st *reward0.State) error { + *st = *reward0.ConstructState(qaPow) + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("mutating state: %w", err) + } + } + + for i, m := range miners { + // Commit sectors + { + for pi, preseal := range m.Sectors { + params := &miner.SectorPreCommitInfo{ + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + } + + dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, params.DealIDs, 0, minerInfos[i].presealExp) + if err != nil { + return cid.Undef, xerrors.Errorf("getting deal weight: %w", err) + } + + sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight) + + // we've added fake power for this sector above, remove it now + err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { + st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint + st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize))) + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("removing fake power: %w", err) + } + + epochReward, err := currentEpochBlockReward(ctx, vm, minerInfos[i].maddr) + if err != nil { + return cid.Undef, xerrors.Errorf("getting current epoch reward: %w", err) + } + + tpow, err := currentTotalPower(ctx, vm, minerInfos[i].maddr) + if err != nil { + return cid.Undef, xerrors.Errorf("getting current total power: %w", err) + } + + pcd := miner0.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight) + + pledge := miner0.InitialPledgeForPower( + sectorWeight, + epochReward.ThisEpochBaselinePower, + tpow.PledgeCollateral, + epochReward.ThisEpochRewardSmoothed, + tpow.QualityAdjPowerSmoothed, + circSupply(ctx, vm, minerInfos[i].maddr), + ) + + pledge = big.Add(pcd, pledge) + + fmt.Println(types.FIL(pledge)) + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin.MethodsMiner.PreCommitSector, mustEnc(params)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) + } + + // Commit one-by-one, otherwise pledge math tends to explode + confirmParams := &builtin.ConfirmSectorProofsParams{ + Sectors: []abi.SectorNumber{preseal.SectorID}, + } + + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), builtin.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) + } + } + } + } + + // Sanity-check total network power + err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { + if !st.TotalRawBytePower.Equals(rawPow) { + return xerrors.Errorf("st.TotalRawBytePower doesn't match previously calculated rawPow") + } + + if !st.TotalQualityAdjPower.Equals(qaPow) { + return xerrors.Errorf("st.TotalQualityAdjPower doesn't match previously calculated qaPow") + } + + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("mutating state: %w", err) + } + + // TODO: Should we re-ConstructState for the reward actor using rawPow as currRealizedPower here? + + c, err := vm.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } + return c, nil +} + +// TODO: copied from actors test harness, deduplicate or remove from here +type fakeRand struct{} + +func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint + return out, nil +} + +func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + out := make([]byte, 32) + _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint + return out, nil +} + +func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) { + pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin.MethodsPower.CurrentTotalPower, nil) + if err != nil { + return nil, err + } + var pwr power0.CurrentTotalPowerReturn + if err := pwr.UnmarshalCBOR(bytes.NewReader(pwret)); err != nil { + return nil, err + } + + return &pwr, nil +} + +func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) (market0.VerifyDealsForActivationReturn, error) { + params := &market.VerifyDealsForActivationParams{ + DealIDs: dealIDs, + SectorStart: sectorStart, + SectorExpiry: sectorExpiry, + } + + var dealWeights market0.VerifyDealsForActivationReturn + ret, err := doExecValue(ctx, vm, + market.Address, + maddr, + abi.NewTokenAmount(0), + builtin.MethodsMarket.VerifyDealsForActivation, + mustEnc(params), + ) + if err != nil { + return market0.VerifyDealsForActivationReturn{}, err + } + if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { + return market0.VerifyDealsForActivationReturn{}, err + } + + return dealWeights, nil +} + +func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward0.ThisEpochRewardReturn, error) { + rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), builtin.MethodsReward.ThisEpochReward, nil) + if err != nil { + return nil, err + } + + var epochReward reward0.ThisEpochRewardReturn + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { + return nil, err + } + + return &epochReward, nil +} + +func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount { + unsafeVM := &vm.UnsafeVM{VM: vmi} + rt := unsafeVM.MakeRuntime(ctx, &types.Message{ + GasLimit: 1_000_000_000, + From: maddr, + }, maddr, 0, 0, 0) + + return rt.TotalFilCircSupply() +} diff --git a/chain/gen/genesis/t00_system.go b/chain/gen/genesis/t00_system.go new file mode 100644 index 000000000..6e6cc976a --- /dev/null +++ b/chain/gen/genesis/t00_system.go @@ -0,0 +1,31 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/specs-actors/actors/builtin/system" + + "github.com/filecoin-project/specs-actors/actors/builtin" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/lotus/chain/types" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +func SetupSystemActor(bs bstore.Blockstore) (*types.Actor, error) { + var st system.State + + cst := cbor.NewCborStore(bs) + + statecid, err := cst.Put(context.TODO(), &st) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: builtin.SystemActorCodeID, + Head: statecid, + } + + return act, nil +} diff --git a/chain/gen/genesis/t01_init.go b/chain/gen/genesis/t01_init.go new file mode 100644 index 000000000..667079a6d --- /dev/null +++ b/chain/gen/genesis/t01_init.go @@ -0,0 +1,144 @@ +package genesis + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/genesis" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) { + if len(initialActors) > MaxAccounts { + return 0, nil, nil, xerrors.New("too many initial actors") + } + + var ias init_.State + ias.NextID = MinerStart + ias.NetworkName = netname + + store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) + amap := adt.MakeEmptyMap(store) + + keyToId := map[address.Address]address.Address{} + counter := int64(AccountStart) + + for _, a := range initialActors { + if a.Type == genesis.TMultisig { + var ainfo genesis.MultisigMeta + if err := json.Unmarshal(a.Meta, &ainfo); err != nil { + return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + for _, e := range ainfo.Signers { + + if _, ok := keyToId[e]; ok { + continue + } + + fmt.Printf("init set %s t0%d\n", e, counter) + + value := cbg.CborInt(counter) + if err := amap.Put(abi.AddrKey(e), &value); err != nil { + return 0, nil, nil, err + } + counter = counter + 1 + var err error + keyToId[e], err = address.NewIDAddress(uint64(value)) + if err != nil { + return 0, nil, nil, err + } + + } + // Need to add actors for all multisigs too + continue + } + + if a.Type != genesis.TAccount { + return 0, nil, nil, xerrors.Errorf("unsupported account type: %s", a.Type) + } + + var ainfo genesis.AccountMeta + if err := json.Unmarshal(a.Meta, &ainfo); err != nil { + return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + + fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter) + + value := cbg.CborInt(counter) + if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { + return 0, nil, nil, err + } + counter = counter + 1 + + var err error + keyToId[ainfo.Owner], err = address.NewIDAddress(uint64(value)) + if err != nil { + return 0, nil, nil, err + } + } + + if rootVerifier.Type == genesis.TAccount { + var ainfo genesis.AccountMeta + if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { + return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + value := cbg.CborInt(80) + if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil { + return 0, nil, nil, err + } + } else if rootVerifier.Type == genesis.TMultisig { + var ainfo genesis.MultisigMeta + if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { + return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + for _, e := range ainfo.Signers { + if _, ok := keyToId[e]; ok { + continue + } + fmt.Printf("init set %s t0%d\n", e, counter) + + value := cbg.CborInt(counter) + if err := amap.Put(abi.AddrKey(e), &value); err != nil { + return 0, nil, nil, err + } + counter = counter + 1 + var err error + keyToId[e], err = address.NewIDAddress(uint64(value)) + if err != nil { + return 0, nil, nil, err + } + + } + } + + amapaddr, err := amap.Root() + if err != nil { + return 0, nil, nil, err + } + ias.AddressMap = amapaddr + + statecid, err := store.Put(store.Context(), &ias) + if err != nil { + return 0, nil, nil, err + } + + act := &types.Actor{ + Code: builtin.InitActorCodeID, + Head: statecid, + } + + return counter, act, keyToId, nil +} diff --git a/chain/gen/genesis/t02_reward.go b/chain/gen/genesis/t02_reward.go new file mode 100644 index 000000000..92531051b --- /dev/null +++ b/chain/gen/genesis/t02_reward.go @@ -0,0 +1,32 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/specs-actors/actors/builtin" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +func SetupRewardActor(bs bstore.Blockstore, qaPower big.Int) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + + st := reward0.ConstructState(qaPower) + + hcid, err := cst.Put(context.TODO(), st) + if err != nil { + return nil, err + } + + return &types.Actor{ + Code: builtin.RewardActorCodeID, + Balance: types.BigInt{Int: build.InitialRewardBalance}, + Head: hcid, + }, nil +} diff --git a/chain/gen/genesis/t03_cron.go b/chain/gen/genesis/t03_cron.go new file mode 100644 index 000000000..cf2c0d7a7 --- /dev/null +++ b/chain/gen/genesis/t03_cron.go @@ -0,0 +1,29 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/builtin/cron" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/lotus/chain/types" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +func SetupCronActor(bs bstore.Blockstore) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + cas := cron.ConstructState(cron.BuiltInEntries()) + + stcid, err := cst.Put(context.TODO(), cas) + if err != nil { + return nil, err + } + + return &types.Actor{ + Code: builtin.CronActorCodeID, + Head: stcid, + Nonce: 0, + Balance: types.NewInt(0), + }, nil +} diff --git a/chain/gen/genesis/t04_power.go b/chain/gen/genesis/t04_power.go new file mode 100644 index 000000000..2f1303ba4 --- /dev/null +++ b/chain/gen/genesis/t04_power.go @@ -0,0 +1,46 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/lotus/chain/types" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) { + store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) + emptyMap, err := adt.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + multiMap, err := adt.AsMultimap(store, emptyMap) + if err != nil { + return nil, err + } + + emptyMultiMap, err := multiMap.Root() + if err != nil { + return nil, err + } + + sms := power0.ConstructState(emptyMap, emptyMultiMap) + + stcid, err := store.Put(store.Context(), sms) + if err != nil { + return nil, err + } + + return &types.Actor{ + Code: builtin.StoragePowerActorCodeID, + Head: stcid, + Nonce: 0, + Balance: types.NewInt(0), + }, nil +} diff --git a/chain/gen/genesis/t05_market.go b/chain/gen/genesis/t05_market.go new file mode 100644 index 000000000..615e8370b --- /dev/null +++ b/chain/gen/genesis/t05_market.go @@ -0,0 +1,41 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/actors/util/adt" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/lotus/chain/types" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +func SetupStorageMarketActor(bs bstore.Blockstore) (*types.Actor, error) { + store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) + + a, err := adt.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + h, err := adt.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + sms := market.ConstructState(a, h, h) + + stcid, err := store.Put(store.Context(), sms) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: builtin.StorageMarketActorCodeID, + Head: stcid, + Balance: types.NewInt(0), + } + + return act, nil +} diff --git a/chain/gen/genesis/t06_vreg.go b/chain/gen/genesis/t06_vreg.go new file mode 100644 index 000000000..1709b205f --- /dev/null +++ b/chain/gen/genesis/t06_vreg.go @@ -0,0 +1,51 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-address" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/specs-actors/actors/builtin" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/types" + bstore "github.com/filecoin-project/lotus/lib/blockstore" +) + +var RootVerifierID address.Address + +func init() { + + idk, err := address.NewFromString("t080") + if err != nil { + panic(err) + } + + RootVerifierID = idk +} + +func SetupVerifiedRegistryActor(bs bstore.Blockstore) (*types.Actor, error) { + store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) + + h, err := adt.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + sms := verifreg0.ConstructState(h, RootVerifierID) + + stcid, err := store.Put(store.Context(), sms) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: builtin.VerifiedRegistryActorCodeID, + Head: stcid, + Balance: types.NewInt(0), + } + + return act, nil +} diff --git a/chain/gen/genesis/util.go b/chain/gen/genesis/util.go new file mode 100644 index 000000000..bcafb007e --- /dev/null +++ b/chain/gen/genesis/util.go @@ -0,0 +1,62 @@ +package genesis + +import ( + "context" + + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/build" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" +) + +func mustEnc(i cbg.CBORMarshaler) []byte { + enc, err := actors.SerializeParams(i) + if err != nil { + panic(err) // ok + } + return enc +} + +func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { + act, err := vm.StateTree().GetActor(from) + if err != nil { + return nil, xerrors.Errorf("doExec failed to get from actor (%s): %w", from, err) + } + + ret, err := vm.ApplyImplicitMessage(ctx, &types.Message{ + To: to, + From: from, + Method: method, + Params: params, + GasLimit: 1_000_000_000_000_000, + Value: value, + Nonce: act.Nonce, + }) + if err != nil { + return nil, xerrors.Errorf("doExec apply message failed: %w", err) + } + + if ret.ExitCode != 0 { + return nil, xerrors.Errorf("failed to call method: %w", ret.ActorErr) + } + + return ret.Return, nil +} + +var GenesisNetworkVersion = func() network.Version { // TODO: Get from build/ + if build.UseNewestNetwork() { // TODO: Get from build/ + return build.NewestNetworkVersion // TODO: Get from build/ + } // TODO: Get from build/ + return network.Version1 // TODO: Get from build/ +}() // TODO: Get from build/ + +func genesisNetworkVersion(context.Context, abi.ChainEpoch) network.Version { // TODO: Get from build/ + return GenesisNetworkVersion // TODO: Get from build/ +} // TODO: Get from build/ diff --git a/chain/gen/mining.go b/chain/gen/mining.go index dec684ec2..dd867da48 100644 --- a/chain/gen/mining.go +++ b/chain/gen/mining.go @@ -3,39 +3,49 @@ package gen import ( "context" - bls "github.com/filecoin-project/filecoin-ffi" - amt "github.com/filecoin-project/go-amt-ipld" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/actors/util/adt" cid "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" + cbor "github.com/ipfs/go-ipld-cbor" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/lib/sigs/bls" ) -func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w *wallet.Wallet, miner address.Address, parents *types.TipSet, ticket *types.Ticket, proof *types.EPostProof, msgs []*types.SignedMessage, height, timestamp uint64) (*types.FullBlock, error) { - st, recpts, err := sm.TipSetState(ctx, parents) +func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w *wallet.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) { + + pts, err := sm.ChainStore().LoadTipSet(bt.Parents) + if err != nil { + return nil, xerrors.Errorf("failed to load parent tipset: %w", err) + } + + st, recpts, err := sm.TipSetState(ctx, pts) if err != nil { return nil, xerrors.Errorf("failed to load tipset state: %w", err) } - worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, st, miner) + worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, st, bt.Miner) if err != nil { return nil, xerrors.Errorf("failed to get miner worker: %w", err) } next := &types.BlockHeader{ - Miner: miner, - Parents: parents.Cids(), - Ticket: ticket, - Height: height, - Timestamp: timestamp, - EPostProof: *proof, + Miner: bt.Miner, + Parents: bt.Parents.Cids(), + Ticket: bt.Ticket, + ElectionProof: bt.Eproof, + + BeaconEntries: bt.BeaconValues, + Height: bt.Epoch, + Timestamp: bt.Timestamp, + WinPoStProof: bt.WinningPoStProof, ParentStateRoot: st, ParentMessageReceipts: recpts, } @@ -44,9 +54,9 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w *wallet.Wal var secpkMessages []*types.SignedMessage var blsMsgCids, secpkMsgCids []cid.Cid - var blsSigs []types.Signature - for _, msg := range msgs { - if msg.Signature.TypeCode() == types.IKTBLS { + var blsSigs []crypto.Signature + for _, msg := range bt.Messages { + if msg.Signature.Type == crypto.SigTypeBLS { blsSigs = append(blsSigs, msg.Signature) blsMessages = append(blsMessages, &msg.Message) @@ -68,17 +78,17 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w *wallet.Wal } } - bs := amt.WrapBlockstore(sm.ChainStore().Blockstore()) - blsmsgroot, err := amt.FromArray(bs, toIfArr(blsMsgCids)) + store := sm.ChainStore().Store(ctx) + blsmsgroot, err := toArray(store, blsMsgCids) if err != nil { return nil, xerrors.Errorf("building bls amt: %w", err) } - secpkmsgroot, err := amt.FromArray(bs, toIfArr(secpkMsgCids)) + secpkmsgroot, err := toArray(store, secpkMsgCids) if err != nil { return nil, xerrors.Errorf("building secpk amt: %w", err) } - mmcid, err := bs.Put(&types.MsgMeta{ + mmcid, err := store.Put(store.Context(), &types.MsgMeta{ BlsMessages: blsmsgroot, SecpkMessages: secpkmsgroot, }) @@ -93,13 +103,19 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w *wallet.Wal } next.BLSAggregate = aggSig - pweight, err := sm.ChainStore().Weight(ctx, parents) + pweight, err := sm.ChainStore().Weight(ctx, pts) if err != nil { return nil, err } next.ParentWeight = pweight - cst := hamt.CSTFromBstore(sm.ChainStore().Blockstore()) + baseFee, err := sm.ChainStore().ComputeBaseFee(ctx, pts) + if err != nil { + return nil, xerrors.Errorf("computing base fee: %w", err) + } + next.ParentBaseFee = baseFee + + cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) tree, err := state.LoadStateTree(cst, st) if err != nil { return nil, xerrors.Errorf("failed to load state tree: %w", err) @@ -131,26 +147,46 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w *wallet.Wal return fullBlock, nil } -func aggregateSignatures(sigs []types.Signature) (types.Signature, error) { - var blsSigs []bls.Signature - for _, s := range sigs { - var bsig bls.Signature - copy(bsig[:], s.Data) - blsSigs = append(blsSigs, bsig) +func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) { + sigsS := make([][]byte, len(sigs)) + for i := 0; i < len(sigs); i++ { + sigsS[i] = sigs[i].Data } - aggSig := bls.Aggregate(blsSigs) - return types.Signature{ - Type: types.KTBLS, - Data: aggSig[:], + aggregator := new(bls.AggregateSignature).AggregateCompressed(sigsS) + if aggregator == nil { + if len(sigs) > 0 { + return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs)) + } + + // Note: for blst this condition should not happen - nil should not + // be returned + return &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: new(bls.Signature).Compress(), + }, nil + } + aggSigAff := aggregator.ToAffine() + if aggSigAff == nil { + return &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: new(bls.Signature).Compress(), + }, nil + } + aggSig := aggSigAff.Compress() + return &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: aggSig, }, nil } -func toIfArr(cids []cid.Cid) []cbg.CBORMarshaler { - out := make([]cbg.CBORMarshaler, 0, len(cids)) - for _, c := range cids { +func toArray(store adt.Store, cids []cid.Cid) (cid.Cid, error) { + arr := adt.MakeEmptyArray(store) + for i, c := range cids { oc := cbg.CborCid(c) - out = append(out, &oc) + if err := arr.Set(uint64(i), &oc); err != nil { + return cid.Undef, err + } } - return out + return arr.Root() } diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go new file mode 100644 index 000000000..fadd3dd27 --- /dev/null +++ b/chain/gen/slashfilter/slashfilter.go @@ -0,0 +1,112 @@ +package slashfilter + +import ( + "fmt" + + "golang.org/x/xerrors" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" +) + +type SlashFilter struct { + byEpoch ds.Datastore // double-fork mining faults, parent-grinding fault + byParents ds.Datastore // time-offset mining faults +} + +func New(dstore ds.Batching) *SlashFilter { + return &SlashFilter{ + byEpoch: namespace.Wrap(dstore, ds.NewKey("/slashfilter/epoch")), + byParents: namespace.Wrap(dstore, ds.NewKey("/slashfilter/parents")), + } +} + +func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { + epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) + { + // double-fork mining (2 blocks at one epoch) + if err := checkFault(f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { + return err + } + } + + parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes())) + { + // time-offset mining faults (2 blocks with the same parents) + if err := checkFault(f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { + return err + } + } + + { + // parent-grinding fault (didn't mine on top of our own block) + + // First check if we have mined a block on the parent epoch + parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch)) + have, err := f.byEpoch.Has(parentEpochKey) + if err != nil { + return err + } + + if have { + // If we had, make sure it's in our parent tipset + cidb, err := f.byEpoch.Get(parentEpochKey) + if err != nil { + return xerrors.Errorf("getting other block cid: %w", err) + } + + _, parent, err := cid.CidFromBytes(cidb) + if err != nil { + return err + } + + var found bool + for _, c := range bh.Parents { + if c.Equals(parent) { + found = true + } + } + + if !found { + return xerrors.Errorf("produced block would trigger 'parent-grinding fault' consensus fault; miner: %s; bh: %s, expected parent: %s", bh.Miner, bh.Cid(), parent) + } + } + } + + if err := f.byParents.Put(parentsKey, bh.Cid().Bytes()); err != nil { + return xerrors.Errorf("putting byEpoch entry: %w", err) + } + + if err := f.byEpoch.Put(epochKey, bh.Cid().Bytes()); err != nil { + return xerrors.Errorf("putting byEpoch entry: %w", err) + } + + return nil +} + +func checkFault(t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { + fault, err := t.Has(key) + if err != nil { + return err + } + + if fault { + cidb, err := t.Get(key) + if err != nil { + return xerrors.Errorf("getting other block cid: %w", err) + } + + _, other, err := cid.CidFromBytes(cidb) + if err != nil { + return err + } + + return xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other) + } + + return nil +} diff --git a/chain/gen/utils.go b/chain/gen/utils.go deleted file mode 100644 index 3f528075e..000000000 --- a/chain/gen/utils.go +++ /dev/null @@ -1,677 +0,0 @@ -package gen - -import ( - "bytes" - "context" - "fmt" - - amt "github.com/filecoin-project/go-amt-ipld" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - hamt "github.com/ipfs/go-hamt-ipld" - blockstore "github.com/ipfs/go-ipfs-blockstore" - bstore "github.com/ipfs/go-ipfs-blockstore" - peer "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/build" - actors "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/genesis" -) - -type GenesisBootstrap struct { - Genesis *types.BlockHeader -} - -func SetupInitActor(bs bstore.Blockstore, addrs []address.Address) (*types.Actor, error) { - var ias actors.InitActorState - ias.NextID = 100 - - cst := hamt.CSTFromBstore(bs) - amap := hamt.NewNode(cst) - - for i, a := range addrs { - if err := amap.Set(context.TODO(), string(a.Bytes()), 100+uint64(i)); err != nil { - return nil, err - } - } - - ias.NextID += uint64(len(addrs)) - if err := amap.Flush(context.TODO()); err != nil { - return nil, err - } - amapcid, err := cst.Put(context.TODO(), amap) - if err != nil { - return nil, err - } - - ias.AddressMap = amapcid - - statecid, err := cst.Put(context.TODO(), &ias) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: actors.InitCodeCid, - Head: statecid, - } - - return act, nil -} - -func MakeInitialStateTree(bs bstore.Blockstore, actmap map[address.Address]types.BigInt) (*state.StateTree, error) { - cst := hamt.CSTFromBstore(bs) - state, err := state.NewStateTree(cst) - if err != nil { - return nil, xerrors.Errorf("making new state tree: %w", err) - } - - emptyobject, err := cst.Put(context.TODO(), map[string]string{}) - if err != nil { - return nil, xerrors.Errorf("failed putting empty object: %w", err) - } - - var addrs []address.Address - for a := range actmap { - addrs = append(addrs, a) - } - - initact, err := SetupInitActor(bs, addrs) - if err != nil { - return nil, xerrors.Errorf("setup init actor: %w", err) - } - - if err := state.SetActor(actors.InitAddress, initact); err != nil { - return nil, xerrors.Errorf("set init actor: %w", err) - } - - cronact, err := SetupCronActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup cron actor: %w", err) - } - - if err := state.SetActor(actors.CronAddress, cronact); err != nil { - return nil, xerrors.Errorf("set cron actor: %w", err) - } - - spact, err := SetupStoragePowerActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup storage market actor: %w", err) - } - - if err := state.SetActor(actors.StoragePowerAddress, spact); err != nil { - return nil, xerrors.Errorf("set storage market actor: %w", err) - } - - netAmt := types.FromFil(build.TotalFilecoin) - for _, amt := range actmap { - netAmt = types.BigSub(netAmt, amt) - } - - err = state.SetActor(actors.NetworkAddress, &types.Actor{ - Code: actors.AccountCodeCid, - Balance: netAmt, - Head: emptyobject, - }) - if err != nil { - return nil, xerrors.Errorf("set network account actor: %w", err) - } - - err = state.SetActor(actors.BurntFundsAddress, &types.Actor{ - Code: actors.AccountCodeCid, - Balance: types.NewInt(0), - Head: emptyobject, - }) - if err != nil { - return nil, xerrors.Errorf("set burnt funds account actor: %w", err) - } - - for a, v := range actmap { - err = state.SetActor(a, &types.Actor{ - Code: actors.AccountCodeCid, - Balance: v, - Head: emptyobject, - }) - if err != nil { - return nil, xerrors.Errorf("setting account from actmap: %w", err) - } - } - - return state, nil -} - -func SetupCronActor(bs bstore.Blockstore) (*types.Actor, error) { - cst := hamt.CSTFromBstore(bs) - cas := &actors.CronActorState{} - - stcid, err := cst.Put(context.TODO(), cas) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: actors.CronCodeCid, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil -} - -func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) { - cst := hamt.CSTFromBstore(bs) - nd := hamt.NewNode(cst) - emptyhamt, err := cst.Put(context.TODO(), nd) - if err != nil { - return nil, err - } - - blks := amt.WrapBlockstore(bs) - emptyamt, err := amt.FromArray(blks, nil) - if err != nil { - return nil, xerrors.Errorf("amt build failed: %w", err) - } - - sms := &actors.StoragePowerState{ - Miners: emptyhamt, - ProvingBuckets: emptyamt, - TotalStorage: types.NewInt(0), - } - - stcid, err := cst.Put(context.TODO(), sms) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: actors.StoragePowerCodeCid, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil -} - -func SetupStorageMarketActor(bs bstore.Blockstore, sroot cid.Cid, deals []actors.StorageDealProposal) (cid.Cid, error) { - cst := hamt.CSTFromBstore(bs) - nd := hamt.NewNode(cst) - emptyHAMT, err := cst.Put(context.TODO(), nd) - if err != nil { - return cid.Undef, err - } - - blks := amt.WrapBlockstore(bs) - - cdeals := make([]cbg.CBORMarshaler, len(deals)) - for i, deal := range deals { - cdeals[i] = &actors.OnChainDeal{ - PieceRef: deal.PieceRef, - PieceSize: deal.PieceSize, - Client: deal.Client, - Provider: deal.Provider, - ProposalExpiration: deal.ProposalExpiration, - Duration: deal.Duration, - StoragePricePerEpoch: deal.StoragePricePerEpoch, - StorageCollateral: deal.StorageCollateral, - ActivationEpoch: 1, - } - } - - dealAmt, err := amt.FromArray(blks, cdeals) - if err != nil { - return cid.Undef, xerrors.Errorf("amt build failed: %w", err) - } - - sms := &actors.StorageMarketState{ - Balances: emptyHAMT, - Deals: dealAmt, - NextDealID: uint64(len(deals)), - } - - stcid, err := cst.Put(context.TODO(), sms) - if err != nil { - return cid.Undef, err - } - - act := &types.Actor{ - Code: actors.StorageMarketCodeCid, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - } - - state, err := state.LoadStateTree(cst, sroot) - if err != nil { - return cid.Undef, xerrors.Errorf("making new state tree: %w", err) - } - - if err := state.SetActor(actors.StorageMarketAddress, act); err != nil { - return cid.Undef, xerrors.Errorf("set storage market actor: %w", err) - } - - return state.Flush() -} - -type GenMinerCfg struct { - PreSeals map[string]genesis.GenesisMiner - - // The addresses of the created miner, this is set by the genesis setup - MinerAddrs []address.Address - - PeerIDs []peer.ID -} - -func mustEnc(i cbg.CBORMarshaler) []byte { - enc, err := actors.SerializeParams(i) - if err != nil { - panic(err) // ok - } - return enc -} - -func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, gmcfg *GenMinerCfg) (cid.Cid, []actors.StorageDealProposal, error) { - vm, err := vm.NewVM(sroot, 0, nil, actors.NetworkAddress, cs.Blockstore()) - if err != nil { - return cid.Undef, nil, xerrors.Errorf("failed to create NewVM: %w", err) - } - - if len(gmcfg.MinerAddrs) == 0 { - return cid.Undef, nil, xerrors.New("no genesis miners") - } - - if len(gmcfg.MinerAddrs) != len(gmcfg.PreSeals) { - return cid.Undef, nil, xerrors.Errorf("miner address list, and preseal count doesn't match (%d != %d)", len(gmcfg.MinerAddrs), len(gmcfg.PreSeals)) - } - - var deals []actors.StorageDealProposal - - for i, maddr := range gmcfg.MinerAddrs { - ps, psok := gmcfg.PreSeals[maddr.String()] - if !psok { - return cid.Undef, nil, xerrors.Errorf("no preseal for miner %s", maddr) - } - - minerParams := &actors.CreateStorageMinerParams{ - Owner: ps.Owner, - Worker: ps.Worker, - SectorSize: ps.SectorSize, - PeerID: gmcfg.PeerIDs[i], // TODO: grab from preseal too - } - - params := mustEnc(minerParams) - - // TODO: hardcoding 6500 here is a little fragile, it changes any - // time anyone changes the initial account allocations - rval, err := doExecValue(ctx, vm, actors.StoragePowerAddress, ps.Worker, types.FromFil(6500), actors.SPAMethods.CreateStorageMiner, params) - if err != nil { - return cid.Undef, nil, xerrors.Errorf("failed to create genesis miner: %w", err) - } - - maddrret, err := address.NewFromBytes(rval) - if err != nil { - return cid.Undef, nil, err - } - - _, err = vm.Flush(ctx) - if err != nil { - return cid.Undef, nil, err - } - - cst := hamt.CSTFromBstore(cs.Blockstore()) - if err := reassignMinerActorAddress(vm, cst, maddrret, maddr); err != nil { - return cid.Undef, nil, err - } - - power := types.BigMul(types.NewInt(minerParams.SectorSize), types.NewInt(uint64(len(ps.Sectors)))) - - params = mustEnc(&actors.UpdateStorageParams{Delta: power}) - - _, err = doExec(ctx, vm, actors.StoragePowerAddress, maddr, actors.SPAMethods.UpdateStorage, params) - if err != nil { - return cid.Undef, nil, xerrors.Errorf("failed to update total storage: %w", err) - } - - // we have to flush the vm here because it buffers stuff internally for perf reasons - if _, err := vm.Flush(ctx); err != nil { - return cid.Undef, nil, xerrors.Errorf("vm.Flush failed: %w", err) - } - - st := vm.StateTree() - mact, err := st.GetActor(maddr) - if err != nil { - return cid.Undef, nil, xerrors.Errorf("get miner actor failed: %w", err) - } - - var mstate actors.StorageMinerActorState - if err := cst.Get(ctx, mact.Head, &mstate); err != nil { - return cid.Undef, nil, xerrors.Errorf("getting miner actor state failed: %w", err) - } - mstate.Power = types.BigMul(types.NewInt(ps.SectorSize), types.NewInt(uint64(len(ps.Sectors)))) - - blks := amt.WrapBlockstore(cs.Blockstore()) - - for _, s := range ps.Sectors { - nssroot, err := actors.AddToSectorSet(ctx, blks, mstate.Sectors, s.SectorID, s.CommR[:], s.CommD[:]) - if err != nil { - return cid.Undef, nil, xerrors.Errorf("failed to add fake sector to sector set: %w", err) - } - mstate.Sectors = nssroot - mstate.ProvingSet = nssroot - - deals = append(deals, s.Deal) - } - - nstate, err := cst.Put(ctx, &mstate) - if err != nil { - return cid.Undef, nil, err - } - - mact.Head = nstate - if err := st.SetActor(maddr, mact); err != nil { - return cid.Undef, nil, err - } - } - - c, err := vm.Flush(ctx) - return c, deals, err -} - -func reassignMinerActorAddress(vm *vm.VM, cst *hamt.CborIpldStore, from, to address.Address) error { - if from == to { - return nil - } - act, err := vm.StateTree().GetActor(from) - if err != nil { - return xerrors.Errorf("reassign: failed to get 'from' actor: %w", err) - } - - _, err = vm.StateTree().GetActor(to) - if err == nil { - return xerrors.Errorf("cannot reassign actor, target address taken") - } - if err := vm.StateTree().SetActor(to, act); err != nil { - return xerrors.Errorf("failed to reassign actor: %w", err) - } - - if err := adjustStorageMarketTracking(vm, cst, from, to); err != nil { - return xerrors.Errorf("adjusting storage market tracking: %w", err) - } - - // Now, adjust the tracking in the init actor - return initActorReassign(vm, cst, from, to) -} - -func adjustStorageMarketTracking(vm *vm.VM, cst *hamt.CborIpldStore, from, to address.Address) error { - ctx := context.TODO() - act, err := vm.StateTree().GetActor(actors.StoragePowerAddress) - if err != nil { - return xerrors.Errorf("loading storage power actor: %w", err) - } - - var spst actors.StoragePowerState - if err := cst.Get(ctx, act.Head, &spst); err != nil { - return xerrors.Errorf("loading storage power actor state: %w", err) - } - - miners, err := hamt.LoadNode(ctx, cst, spst.Miners) - if err != nil { - return xerrors.Errorf("loading miner set: %w", err) - } - - if err := miners.Delete(ctx, string(from.Bytes())); err != nil { - return xerrors.Errorf("deleting from spa set: %w", err) - } - - if err := miners.Set(ctx, string(to.Bytes()), uint64(1)); err != nil { - return xerrors.Errorf("failed setting miner: %w", err) - } - - if err := miners.Flush(ctx); err != nil { - return err - } - - nminerscid, err := cst.Put(ctx, miners) - if err != nil { - return err - } - spst.Miners = nminerscid - - nhead, err := cst.Put(ctx, &spst) - if err != nil { - return err - } - - act.Head = nhead - - return nil -} - -func initActorReassign(vm *vm.VM, cst *hamt.CborIpldStore, from, to address.Address) error { - ctx := context.TODO() - initact, err := vm.StateTree().GetActor(actors.InitAddress) - if err != nil { - return xerrors.Errorf("couldnt get init actor: %w", err) - } - - var st actors.InitActorState - if err := cst.Get(ctx, initact.Head, &st); err != nil { - return xerrors.Errorf("reassign loading init actor state: %w", err) - } - - amap, err := hamt.LoadNode(ctx, cst, st.AddressMap) - if err != nil { - return xerrors.Errorf("failed to load init actor map: %w", err) - } - - target, err := address.IDFromAddress(from) - if err != nil { - return xerrors.Errorf("failed to extract ID: %w", err) - } - - var out string - halt := xerrors.Errorf("halt") - err = amap.ForEach(ctx, func(k string, v interface{}) error { - _, val, err := cbg.CborReadHeader(bytes.NewReader(v.(*cbg.Deferred).Raw)) - if err != nil { - return xerrors.Errorf("parsing int in map failed: %w", err) - } - - if val == target { - out = k - return halt - } - return nil - }) - - if err == nil { - return xerrors.Errorf("could not find from address in init ID map") - } - if !xerrors.Is(err, halt) { - return xerrors.Errorf("finding address in ID map failed: %w", err) - } - - if err := amap.Delete(ctx, out); err != nil { - return xerrors.Errorf("deleting 'from' entry in amap: %w", err) - } - - if err := amap.Set(ctx, out, target); err != nil { - return xerrors.Errorf("setting 'to' entry in amap: %w", err) - } - - if err := amap.Flush(ctx); err != nil { - return xerrors.Errorf("failed to flush amap: %w", err) - } - - ncid, err := cst.Put(ctx, amap) - if err != nil { - return err - } - - st.AddressMap = ncid - - nacthead, err := cst.Put(ctx, &st) - if err != nil { - return err - } - - initact.Head = nacthead - - return nil -} - -func doExec(ctx context.Context, vm *vm.VM, to, from address.Address, method uint64, params []byte) ([]byte, error) { - return doExecValue(ctx, vm, to, from, types.NewInt(0), method, params) -} - -func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method uint64, params []byte) ([]byte, error) { - act, err := vm.StateTree().GetActor(from) - if err != nil { - return nil, xerrors.Errorf("doExec failed to get from actor: %w", err) - } - - ret, err := vm.ApplyMessage(context.TODO(), &types.Message{ - To: to, - From: from, - Method: method, - Params: params, - GasLimit: types.NewInt(1000000), - GasPrice: types.NewInt(0), - Value: value, - Nonce: act.Nonce, - }) - if err != nil { - return nil, xerrors.Errorf("doExec apply message failed: %w", err) - } - - if ret.ExitCode != 0 { - return nil, fmt.Errorf("failed to call method: %s", ret.ActorErr) - } - - return ret.Return, nil -} - -func MakeGenesisBlock(bs bstore.Blockstore, balances map[address.Address]types.BigInt, gmcfg *GenMinerCfg, ts uint64) (*GenesisBootstrap, error) { - ctx := context.Background() - - state, err := MakeInitialStateTree(bs, balances) - if err != nil { - return nil, xerrors.Errorf("make initial state tree failed: %w", err) - } - - stateroot, err := state.Flush() - if err != nil { - return nil, xerrors.Errorf("flush state tree failed: %w", err) - } - - // temp chainstore - cs := store.NewChainStore(bs, datastore.NewMapDatastore()) - stateroot, deals, err := SetupStorageMiners(ctx, cs, stateroot, gmcfg) - if err != nil { - return nil, xerrors.Errorf("setup storage miners failed: %w", err) - } - - stateroot, err = SetupStorageMarketActor(bs, stateroot, deals) - if err != nil { - return nil, xerrors.Errorf("setup storage market actor: %w", err) - } - - stateroot, err = AdjustInitActorStartID(ctx, bs, stateroot, 1000) - if err != nil { - return nil, xerrors.Errorf("failed to adjust init actor start ID: %w", err) - } - - blks := amt.WrapBlockstore(bs) - - emptyroot, err := amt.FromArray(blks, nil) - if err != nil { - return nil, xerrors.Errorf("amt build failed: %w", err) - } - - mm := &types.MsgMeta{ - BlsMessages: emptyroot, - SecpkMessages: emptyroot, - } - mmb, err := mm.ToStorageBlock() - if err != nil { - return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) - } - if err := bs.Put(mmb); err != nil { - return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) - } - - log.Infof("Empty Genesis root: %s", emptyroot) - - genesisticket := &types.Ticket{ - VRFProof: []byte("vrf proof0000000vrf proof0000000"), - } - - b := &types.BlockHeader{ - Miner: actors.InitAddress, - Ticket: genesisticket, - EPostProof: types.EPostProof{ - Proof: []byte("not a real proof"), - PostRand: []byte("i guess this is kinda random"), - }, - Parents: []cid.Cid{}, - Height: 0, - ParentWeight: types.NewInt(0), - ParentStateRoot: stateroot, - Messages: mmb.Cid(), - ParentMessageReceipts: emptyroot, - BLSAggregate: types.Signature{Type: types.KTBLS, Data: []byte("signatureeee")}, - BlockSig: &types.Signature{Type: types.KTBLS, Data: []byte("block signatureeee")}, - Timestamp: ts, - } - - sb, err := b.ToStorageBlock() - if err != nil { - return nil, xerrors.Errorf("serializing block header failed: %w", err) - } - - if err := bs.Put(sb); err != nil { - return nil, xerrors.Errorf("putting header to blockstore: %w", err) - } - - return &GenesisBootstrap{ - Genesis: b, - }, nil -} - -func AdjustInitActorStartID(ctx context.Context, bs blockstore.Blockstore, stateroot cid.Cid, val uint64) (cid.Cid, error) { - cst := hamt.CSTFromBstore(bs) - - tree, err := state.LoadStateTree(cst, stateroot) - if err != nil { - return cid.Undef, err - } - - act, err := tree.GetActor(actors.InitAddress) - if err != nil { - return cid.Undef, err - } - - var st actors.InitActorState - if err := cst.Get(ctx, act.Head, &st); err != nil { - return cid.Undef, err - } - - st.NextID = val - - nstate, err := cst.Put(ctx, &st) - if err != nil { - return cid.Undef, err - } - - act.Head = nstate - - if err := tree.SetActor(actors.InitAddress, act); err != nil { - return cid.Undef, err - } - - return tree.Flush() -} diff --git a/chain/market/fundmgr.go b/chain/market/fundmgr.go index 1500a78a5..aef3b98eb 100644 --- a/chain/market/fundmgr.go +++ b/chain/market/fundmgr.go @@ -4,76 +4,163 @@ import ( "context" "sync" - "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "go.uber.org/fx" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/impl/full" ) -type FundMgr struct { - sm *stmgr.StateManager - mpool full.MpoolAPI +var log = logging.Logger("market_adapter") - lk sync.Mutex +// API is the dependencies need to run a fund manager +type API struct { + fx.In + + full.ChainAPI + full.StateAPI + full.MpoolAPI +} + +// FundMgr monitors available balances and adds funds when EnsureAvailable is called +type FundMgr struct { + api fundMgrAPI + + lk sync.RWMutex available map[address.Address]types.BigInt } -func NewFundMgr(sm *stmgr.StateManager, mpool full.MpoolAPI) *FundMgr { - return &FundMgr{ - sm: sm, - mpool: mpool, +// StartFundManager creates a new fund manager and sets up event hooks to manage state changes +func StartFundManager(lc fx.Lifecycle, api API) *FundMgr { + fm := newFundMgr(&api) + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + ev := events.NewEvents(ctx, &api) + preds := state.NewStatePredicates(&api) + dealDiffFn := preds.OnStorageMarketActorChanged(preds.OnBalanceChanged(preds.AvailableBalanceChangedForAddresses(fm.getAddresses))) + match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { + return dealDiffFn(ctx, oldTs.Key(), newTs.Key()) + } + return ev.StateChanged(fm.checkFunc, fm.stateChanged, fm.revert, 0, events.NoTimeout, match) + }, + }) + return fm +} +type fundMgrAPI interface { + StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) + MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) +} + +func newFundMgr(api fundMgrAPI) *FundMgr { + return &FundMgr{ + api: api, available: map[address.Address]types.BigInt{}, } } -func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr address.Address, amt types.BigInt) error { - fm.lk.Lock() - avail, ok := fm.available[addr] - if !ok { - bal, err := fm.sm.MarketBalance(ctx, addr, nil) - if err != nil { - fm.lk.Unlock() - return err - } +// checkFunc tells the events api to simply proceed (we always want to watch) +func (fm *FundMgr) checkFunc(ts *types.TipSet) (done bool, more bool, err error) { + return false, true, nil +} - avail = bal.Available - } - - toAdd := types.NewInt(0) - avail = types.BigSub(avail, amt) - if avail.LessThan(types.NewInt(0)) { - // TODO: some rules around adding more to avoid doing stuff on-chain - // all the time - toAdd = types.BigSub(toAdd, avail) - avail = types.NewInt(0) - } - fm.available[addr] = avail - - fm.lk.Unlock() - - smsg, err := fm.mpool.MpoolPushMessage(ctx, &types.Message{ - To: actors.StorageMarketAddress, - From: addr, - Value: toAdd, - GasPrice: types.NewInt(0), - GasLimit: types.NewInt(1000000), - Method: actors.SMAMethods.AddBalance, - }) - if err != nil { - return err - } - - _, r, err := fm.sm.WaitForMessage(ctx, smsg.Cid()) - if err != nil { - return err - } - - if r.ExitCode != 0 { - return xerrors.Errorf("adding funds to storage miner market actor failed: exit %d", r.ExitCode) - } +// revert handles reverts to balances +func (fm *FundMgr) revert(ctx context.Context, ts *types.TipSet) error { + // TODO: Is it ok to just ignore this? + log.Warn("balance change reverted; TODO: actually handle this!") return nil } + +// stateChanged handles balance changes monitored on the chain from one tipset to the next +func (fm *FundMgr) stateChanged(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) { + changedBalances, ok := states.(state.ChangedBalances) + if !ok { + panic("Expected state.ChangedBalances") + } + // overwrite our in memory cache with new values from chain (chain is canonical) + fm.lk.Lock() + for addr, balanceChange := range changedBalances { + if fm.available[addr].Int != nil { + log.Infof("State balance change recorded, prev: %s, new: %s", fm.available[addr].String(), balanceChange.To.String()) + } + + fm.available[addr] = balanceChange.To + } + fm.lk.Unlock() + return true, nil +} + +func (fm *FundMgr) getAddresses() []address.Address { + fm.lk.RLock() + defer fm.lk.RUnlock() + addrs := make([]address.Address, 0, len(fm.available)) + for addr := range fm.available { + addrs = append(addrs, addr) + } + return addrs +} + +// EnsureAvailable looks at the available balance in escrow for a given +// address, and if less than the passed in amount, adds the difference +func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { + idAddr, err := fm.api.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + return cid.Undef, err + } + fm.lk.Lock() + bal, err := fm.api.StateMarketBalance(ctx, addr, types.EmptyTSK) + if err != nil { + fm.lk.Unlock() + return cid.Undef, err + } + + stateAvail := types.BigSub(bal.Escrow, bal.Locked) + + avail, ok := fm.available[idAddr] + if !ok { + avail = stateAvail + } + + toAdd := types.BigSub(amt, avail) + if toAdd.LessThan(types.NewInt(0)) { + toAdd = types.NewInt(0) + } + fm.available[idAddr] = big.Add(avail, toAdd) + fm.lk.Unlock() + + log.Infof("Funds operation w/ Expected Balance: %s, In State: %s, Requested: %s, Adding: %s", avail.String(), stateAvail.String(), amt.String(), toAdd.String()) + + if toAdd.LessThanEqual(big.Zero()) { + return cid.Undef, nil + } + + params, err := actors.SerializeParams(&addr) + if err != nil { + return cid.Undef, err + } + + smsg, err := fm.api.MpoolPushMessage(ctx, &types.Message{ + To: market.Address, + From: wallet, + Value: toAdd, + Method: builtin.MethodsMarket.AddBalance, + Params: params, + }, nil) + if err != nil { + return cid.Undef, err + } + + return smsg.Cid(), nil +} diff --git a/chain/market/fundmgr_test.go b/chain/market/fundmgr_test.go new file mode 100644 index 000000000..b05db55d8 --- /dev/null +++ b/chain/market/fundmgr_test.go @@ -0,0 +1,189 @@ +package market + +import ( + "context" + "errors" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/actors/builtin" + tutils "github.com/filecoin-project/specs-actors/support/testing" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" +) + +type fakeAPI struct { + returnedBalance api.MarketBalance + returnedBalanceErr error + signature crypto.Signature + receivedMessage *types.Message + pushMessageErr error + lookupIDErr error +} + +func (fapi *fakeAPI) StateLookupID(_ context.Context, addr address.Address, _ types.TipSetKey) (address.Address, error) { + return addr, fapi.lookupIDErr +} +func (fapi *fakeAPI) StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) { + return fapi.returnedBalance, fapi.returnedBalanceErr +} + +func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { + fapi.receivedMessage = msg + return &types.SignedMessage{ + Message: *msg, + Signature: fapi.signature, + }, fapi.pushMessageErr +} + +func addFundsMsg(toAdd abi.TokenAmount, addr address.Address, wallet address.Address) *types.Message { + params, _ := actors.SerializeParams(&addr) + return &types.Message{ + To: market.Address, + From: wallet, + Value: toAdd, + Method: builtin.MethodsMarket.AddBalance, + Params: params, + } +} + +type expectedResult struct { + addAmt abi.TokenAmount + shouldAdd bool + err error +} + +func TestAddFunds(t *testing.T) { + ctx := context.Background() + testCases := map[string]struct { + returnedBalanceErr error + returnedBalance api.MarketBalance + addAmounts []abi.TokenAmount + pushMessageErr error + expectedResults []expectedResult + lookupIDErr error + }{ + "succeeds, trivial case": { + returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)}, + addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, + expectedResults: []expectedResult{ + { + addAmt: abi.NewTokenAmount(100), + shouldAdd: true, + err: nil, + }, + }, + }, + "succeeds, money already present": { + returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)}, + addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, + expectedResults: []expectedResult{ + { + shouldAdd: false, + err: nil, + }, + }, + }, + "succeeds, multiple adds": { + returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(150), Locked: abi.NewTokenAmount(50)}, + addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100), abi.NewTokenAmount(200), abi.NewTokenAmount(250), abi.NewTokenAmount(250)}, + expectedResults: []expectedResult{ + { + shouldAdd: false, + err: nil, + }, + { + addAmt: abi.NewTokenAmount(100), + shouldAdd: true, + err: nil, + }, + { + addAmt: abi.NewTokenAmount(50), + shouldAdd: true, + err: nil, + }, + { + shouldAdd: false, + err: nil, + }, + }, + }, + "error on market balance": { + returnedBalanceErr: errors.New("something went wrong"), + addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, + expectedResults: []expectedResult{ + { + err: errors.New("something went wrong"), + }, + }, + }, + "error on push message": { + returnedBalance: api.MarketBalance{Escrow: abi.NewTokenAmount(0), Locked: abi.NewTokenAmount(0)}, + pushMessageErr: errors.New("something went wrong"), + addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, + expectedResults: []expectedResult{ + { + err: errors.New("something went wrong"), + }, + }, + }, + "error looking up address": { + lookupIDErr: errors.New("something went wrong"), + addAmounts: []abi.TokenAmount{abi.NewTokenAmount(100)}, + expectedResults: []expectedResult{ + { + err: errors.New("something went wrong"), + }, + }, + }, + } + + for testCase, data := range testCases { + //nolint:scopelint + t.Run(testCase, func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + sig := make([]byte, 100) + _, err := rand.Read(sig) + require.NoError(t, err) + fapi := &fakeAPI{ + returnedBalance: data.returnedBalance, + returnedBalanceErr: data.returnedBalanceErr, + signature: crypto.Signature{ + Type: crypto.SigTypeUnknown, + Data: sig, + }, + pushMessageErr: data.pushMessageErr, + lookupIDErr: data.lookupIDErr, + } + fundMgr := newFundMgr(fapi) + addr := tutils.NewIDAddr(t, uint64(rand.Uint32())) + wallet := tutils.NewIDAddr(t, uint64(rand.Uint32())) + for i, amount := range data.addAmounts { + fapi.receivedMessage = nil + _, err := fundMgr.EnsureAvailable(ctx, addr, wallet, amount) + expected := data.expectedResults[i] + if expected.err == nil { + require.NoError(t, err) + if expected.shouldAdd { + expectedMessage := addFundsMsg(expected.addAmt, addr, wallet) + require.Equal(t, expectedMessage, fapi.receivedMessage) + } else { + require.Nil(t, fapi.receivedMessage) + } + } else { + require.EqualError(t, err, expected.err.Error()) + } + } + }) + } +} diff --git a/chain/messagepool/block_proba.go b/chain/messagepool/block_proba.go new file mode 100644 index 000000000..61bb018d7 --- /dev/null +++ b/chain/messagepool/block_proba.go @@ -0,0 +1,102 @@ +package messagepool + +import ( + "math" + "sync" +) + +var noWinnersProbCache []float64 +var noWinnersProbOnce sync.Once + +func noWinnersProb() []float64 { + noWinnersProbOnce.Do(func() { + poissPdf := func(x float64) float64 { + const Mu = 5 + lg, _ := math.Lgamma(x + 1) + result := math.Exp((math.Log(Mu) * x) - lg - Mu) + return result + } + + out := make([]float64, 0, MaxBlocks) + for i := 0; i < MaxBlocks; i++ { + out = append(out, poissPdf(float64(i))) + } + noWinnersProbCache = out + }) + return noWinnersProbCache +} + +var noWinnersProbAssumingCache []float64 +var noWinnersProbAssumingOnce sync.Once + +func noWinnersProbAssumingMoreThanOne() []float64 { + noWinnersProbAssumingOnce.Do(func() { + cond := math.Log(-1 + math.Exp(5)) + poissPdf := func(x float64) float64 { + const Mu = 5 + lg, _ := math.Lgamma(x + 1) + result := math.Exp((math.Log(Mu) * x) - lg - cond) + return result + } + + out := make([]float64, 0, MaxBlocks) + for i := 0; i < MaxBlocks; i++ { + out = append(out, poissPdf(float64(i+1))) + } + noWinnersProbAssumingCache = out + }) + return noWinnersProbAssumingCache +} + +func binomialCoefficient(n, k float64) float64 { + if k > n { + return math.NaN() + } + r := 1.0 + for d := 1.0; d <= k; d++ { + r *= n + r /= d + n-- + } + return r +} + +func (mp *MessagePool) blockProbabilities(tq float64) []float64 { + noWinners := noWinnersProbAssumingMoreThanOne() + + p := 1 - tq + binoPdf := func(x, trials float64) float64 { + // based on https://github.com/atgjack/prob + if x > trials { + return 0 + } + if p == 0 { + if x == 0 { + return 1.0 + } + return 0.0 + } + if p == 1 { + if x == trials { + return 1.0 + } + return 0.0 + } + coef := binomialCoefficient(trials, x) + pow := math.Pow(p, x) * math.Pow(1-p, trials-x) + if math.IsInf(coef, 0) { + return 0 + } + return coef * pow + } + + out := make([]float64, 0, MaxBlocks) + for place := 0; place < MaxBlocks; place++ { + var pPlace float64 + for otherWinners, pCase := range noWinners { + pPlace += pCase * binoPdf(float64(place), float64(otherWinners)) + } + out = append(out, pPlace) + } + return out +} diff --git a/chain/messagepool/block_proba_test.go b/chain/messagepool/block_proba_test.go new file mode 100644 index 000000000..93f51e887 --- /dev/null +++ b/chain/messagepool/block_proba_test.go @@ -0,0 +1,43 @@ +package messagepool + +import ( + "math" + "math/rand" + "testing" + "time" +) + +func TestBlockProbability(t *testing.T) { + mp := &MessagePool{} + bp := mp.blockProbabilities(1 - 0.15) + t.Logf("%+v\n", bp) + for i := 0; i < len(bp)-1; i++ { + if bp[i] < bp[i+1] { + t.Fatalf("expected decreasing block probabilities for this quality: %d %f %f", + i, bp[i], bp[i+1]) + } + } +} + +func TestWinnerProba(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + const N = 1000000 + winnerProba := noWinnersProb() + sum := 0 + for i := 0; i < N; i++ { + minersRand := rand.Float64() + j := 0 + for ; j < MaxBlocks; j++ { + minersRand -= winnerProba[j] + if minersRand < 0 { + break + } + } + sum += j + } + + if avg := float64(sum) / N; math.Abs(avg-5) > 0.01 { + t.Fatalf("avg too far off: %f", avg) + } + +} diff --git a/chain/messagepool/config.go b/chain/messagepool/config.go new file mode 100644 index 000000000..f8f0ee985 --- /dev/null +++ b/chain/messagepool/config.go @@ -0,0 +1,92 @@ +package messagepool + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/ipfs/go-datastore" +) + +var ( + ReplaceByFeeRatioDefault = 1.25 + MemPoolSizeLimitHiDefault = 30000 + MemPoolSizeLimitLoDefault = 20000 + PruneCooldownDefault = time.Minute + GasLimitOverestimation = 1.25 + + ConfigKey = datastore.NewKey("/mpool/config") +) + +func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { + haveCfg, err := ds.Has(ConfigKey) + if err != nil { + return nil, err + } + + if !haveCfg { + return DefaultConfig(), nil + } + + cfgBytes, err := ds.Get(ConfigKey) + if err != nil { + return nil, err + } + cfg := new(types.MpoolConfig) + err = json.Unmarshal(cfgBytes, cfg) + return cfg, err +} + +func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return err + } + return ds.Put(ConfigKey, cfgBytes) +} + +func (mp *MessagePool) GetConfig() *types.MpoolConfig { + mp.cfgLk.Lock() + defer mp.cfgLk.Unlock() + return mp.cfg.Clone() +} + +func validateConfg(cfg *types.MpoolConfig) error { + if cfg.ReplaceByFeeRatio < ReplaceByFeeRatioDefault { + return fmt.Errorf("'ReplaceByFeeRatio' is less than required %f < %f", + cfg.ReplaceByFeeRatio, ReplaceByFeeRatioDefault) + } + if cfg.GasLimitOverestimation < 1 { + return fmt.Errorf("'GasLimitOverestimation' cannot be less than 1") + } + return nil +} + +func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error { + if err := validateConfg(cfg); err != nil { + return err + } + cfg = cfg.Clone() + + mp.cfgLk.Lock() + mp.cfg = cfg + err := saveConfig(cfg, mp.ds) + if err != nil { + log.Warnf("error persisting mpool config: %s", err) + } + mp.cfgLk.Unlock() + + return nil +} + +func DefaultConfig() *types.MpoolConfig { + return &types.MpoolConfig{ + SizeLimitHigh: MemPoolSizeLimitHiDefault, + SizeLimitLow: MemPoolSizeLimitLoDefault, + ReplaceByFeeRatio: ReplaceByFeeRatioDefault, + PruneCooldown: PruneCooldownDefault, + GasLimitOverestimation: GasLimitOverestimation, + } +} diff --git a/chain/messagepool/gasguess/guessgas.go b/chain/messagepool/gasguess/guessgas.go new file mode 100644 index 000000000..af58db7d2 --- /dev/null +++ b/chain/messagepool/gasguess/guessgas.go @@ -0,0 +1,79 @@ +package gasguess + +import ( + "context" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" +) + +type ActorLookup func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) + +const failedGasGuessRatio = 0.5 +const failedGasGuessMax = 25_000_000 + +const MinGas = 1298450 +const MaxGas = 1600271356 + +type CostKey struct { + Code cid.Cid + M abi.MethodNum +} + +var Costs = map[CostKey]int64{ + {builtin.InitActorCodeID, 2}: 8916753, + {builtin.StorageMarketActorCodeID, 2}: 6955002, + {builtin.StorageMarketActorCodeID, 4}: 245436108, + {builtin.StorageMinerActorCodeID, 4}: 2315133, + {builtin.StorageMinerActorCodeID, 5}: 1600271356, + {builtin.StorageMinerActorCodeID, 6}: 22864493, + {builtin.StorageMinerActorCodeID, 7}: 142002419, + {builtin.StorageMinerActorCodeID, 10}: 23008274, + {builtin.StorageMinerActorCodeID, 11}: 19303178, + {builtin.StorageMinerActorCodeID, 14}: 566356835, + {builtin.StorageMinerActorCodeID, 16}: 5325185, + {builtin.StorageMinerActorCodeID, 18}: 2328637, + {builtin.StoragePowerActorCodeID, 2}: 23600956, +} + +func failedGuess(msg *types.SignedMessage) int64 { + guess := int64(float64(msg.Message.GasLimit) * failedGasGuessRatio) + if guess > failedGasGuessMax { + guess = failedGasGuessMax + } + return guess +} + +func GuessGasUsed(ctx context.Context, tsk types.TipSetKey, msg *types.SignedMessage, al ActorLookup) (int64, error) { + if msg.Message.Method == builtin.MethodSend { + switch msg.Message.From.Protocol() { + case address.BLS: + return 1298450, nil + case address.SECP256K1: + return 1385999, nil + default: + // who knows? + return 1298450, nil + } + } + + to, err := al(ctx, msg.Message.To, tsk) + if err != nil { + return failedGuess(msg), xerrors.Errorf("could not lookup actor: %w", err) + } + + guess, ok := Costs[CostKey{to.Code, msg.Message.Method}] + if !ok { + return failedGuess(msg), xerrors.Errorf("unknown code-method combo") + } + if guess > msg.Message.GasLimit { + guess = msg.Message.GasLimit + } + return guess, nil +} diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 8bbf3823a..96900925f 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -4,32 +4,60 @@ import ( "bytes" "context" "errors" + "fmt" + "math" + stdbig "math/big" "sort" "sync" "time" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/hashicorp/go-multierror" lru "github.com/hashicorp/golang-lru" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" "github.com/ipfs/go-datastore/query" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" pubsub "github.com/libp2p/go-libp2p-pubsub" lps "github.com/whyrusleeping/pubsub" - "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/node/modules/dtypes" + + "github.com/raulk/clock" ) var log = logging.Logger("messagepool") +var futureDebug = false + +var rbfNumBig = types.NewInt(uint64((ReplaceByFeeRatioDefault - 1) * RbfDenom)) +var rbfDenomBig = types.NewInt(RbfDenom) + +const RbfDenom = 256 + +var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second + +var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee)) +var baseFeeLowerBoundFactor = types.NewInt(10) +var baseFeeLowerBoundFactorConservative = types.NewInt(100) + +var MaxActorPendingMessages = 1000 + +var MaxNonceGap = uint64(4) + var ( ErrMessageTooBig = errors.New("message too big") @@ -37,24 +65,67 @@ var ( ErrNonceTooLow = errors.New("message nonce too low") + ErrGasFeeCapTooLow = errors.New("gas fee cap too low") + ErrNotEnoughFunds = errors.New("not enough funds to execute transaction") ErrInvalidToAddr = errors.New("message had invalid to address") + + ErrSoftValidationFailure = errors.New("validation failure") + ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium") + ErrTooManyPendingMessages = errors.New("too many pending messages for actor") + ErrNonceGap = errors.New("unfulfilled nonce gap") + + ErrTryAgain = errors.New("state inconsistency while pushing message; please try again") ) const ( - msgTopic = "/fil/messages" - localMsgsDs = "/mpool/local" localUpdates = "update" ) +// Journal event types. +const ( + evtTypeMpoolAdd = iota + evtTypeMpoolRemove + evtTypeMpoolRepub +) + +// MessagePoolEvt is the journal entry for message pool events. +type MessagePoolEvt struct { + Action string + Messages []MessagePoolEvtMessage + Error error `json:",omitempty"` +} + +type MessagePoolEvtMessage struct { + types.Message + + CID cid.Cid +} + +func init() { + // if the republish interval is too short compared to the pubsub timecache, adjust it + minInterval := pubsub.TimeCacheDuration + time.Duration(build.PropagationDelaySecs) + if RepublishInterval < minInterval { + RepublishInterval = minInterval + } +} + type MessagePool struct { lk sync.Mutex - closer chan struct{} - repubTk *time.Ticker + ds dtypes.MetadataDS + + addSema chan struct{} + + closer chan struct{} + + repubTk *clock.Ticker + repubTrigger chan struct{} + + republished map[cid.Cid]struct{} localAddrs map[address.Address]struct{} @@ -63,114 +134,219 @@ type MessagePool struct { curTsLk sync.Mutex // DO NOT LOCK INSIDE lk curTs *types.TipSet + cfgLk sync.Mutex + cfg *types.MpoolConfig + api Provider minGasPrice types.BigInt - maxTxPoolSize int + currentSize int + + // pruneTrigger is a channel used to trigger a mempool pruning + pruneTrigger chan struct{} + + // pruneCooldown is a channel used to allow a cooldown time between prunes + pruneCooldown chan struct{} blsSigCache *lru.TwoQueueCache changes *lps.PubSub localMsgs datastore.Datastore + + netName dtypes.NetworkName + + sigValCache *lru.TwoQueueCache + + evtTypes [3]journal.EventType } type msgSet struct { - msgs map[uint64]*types.SignedMessage - nextNonce uint64 + msgs map[uint64]*types.SignedMessage + nextNonce uint64 + requiredFunds *stdbig.Int } -func newMsgSet() *msgSet { +func newMsgSet(nonce uint64) *msgSet { return &msgSet{ - msgs: make(map[uint64]*types.SignedMessage), + msgs: make(map[uint64]*types.SignedMessage), + nextNonce: nonce, + requiredFunds: stdbig.NewInt(0), } } -func (ms *msgSet) add(m *types.SignedMessage) error { - if len(ms.msgs) == 0 || m.Message.Nonce >= ms.nextNonce { - ms.nextNonce = m.Message.Nonce + 1 +func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount { + minPrice := types.BigAdd(curPrem, types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig)) + return types.BigAdd(minPrice, types.NewInt(1)) +} + +func CapGasFee(msg *types.Message, maxFee abi.TokenAmount) { + if maxFee.Equals(big.Zero()) { + maxFee = types.NewInt(build.FilecoinPrecision / 10) } - if _, has := ms.msgs[m.Message.Nonce]; has { - if m.Cid() != ms.msgs[m.Message.Nonce].Cid() { - log.Error("Add with duplicate nonce") - return xerrors.Errorf("message to %s with nonce %d already in mpool") + + gl := types.NewInt(uint64(msg.GasLimit)) + totalFee := types.BigMul(msg.GasFeeCap, gl) + + if totalFee.LessThanEqual(maxFee) { + return + } + + msg.GasFeeCap = big.Div(maxFee, gl) + msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap +} + +func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (bool, error) { + nextNonce := ms.nextNonce + nonceGap := false + switch { + case m.Message.Nonce == nextNonce: + nextNonce++ + // advance if we are filling a gap + for _, fillGap := ms.msgs[nextNonce]; fillGap; _, fillGap = ms.msgs[nextNonce] { + nextNonce++ } + + case strict && m.Message.Nonce > nextNonce+MaxNonceGap: + return false, xerrors.Errorf("message nonce has too big a gap from expected nonce (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap) + + case m.Message.Nonce > nextNonce: + nonceGap = true } + + exms, has := ms.msgs[m.Message.Nonce] + if has { + // refuse RBF if we have a gap + if strict && nonceGap { + return false, xerrors.Errorf("rejecting replace by fee because of nonce gap (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap) + } + + if m.Cid() != exms.Cid() { + // check if RBF passes + minPrice := ComputeMinRBF(exms.Message.GasPremium) + if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 { + log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium, + "newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce) + } else { + log.Info("add with duplicate nonce") + return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+ + " increase GasPremium to %s from %s to trigger replace by fee: %w", + m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium, + ErrRBFTooLowPremium) + } + } else { + return false, xerrors.Errorf("message from %s with nonce %d already in mpool: %w", + m.Message.From, m.Message.Nonce, ErrSoftValidationFailure) + } + + ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int) + //ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int) + } + + if !has && strict && len(ms.msgs) > MaxActorPendingMessages { + log.Errorf("too many pending messages from actor %s", m.Message.From) + return false, ErrTooManyPendingMessages + } + + if strict && nonceGap { + log.Warnf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)", + m.Message.From, m.Message.Nonce, nextNonce) + } + + ms.nextNonce = nextNonce ms.msgs[m.Message.Nonce] = m + ms.requiredFunds.Add(ms.requiredFunds, m.Message.RequiredFunds().Int) + //ms.requiredFunds.Add(ms.requiredFunds, m.Message.Value.Int) - return nil + return !has, nil } -type Provider interface { - SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet - PutMessage(m store.ChainMsg) (cid.Cid, error) - PubSubPublish(string, []byte) error - StateGetActor(address.Address, *types.TipSet) (*types.Actor, error) - MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) - MessagesForTipset(*types.TipSet) ([]store.ChainMsg, error) - LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) +func (ms *msgSet) rm(nonce uint64, applied bool) { + m, has := ms.msgs[nonce] + if !has { + if applied && nonce >= ms.nextNonce { + // we removed a message we did not know about because it was applied + // we need to adjust the nonce and check if we filled a gap + ms.nextNonce = nonce + 1 + for _, fillGap := ms.msgs[ms.nextNonce]; fillGap; _, fillGap = ms.msgs[ms.nextNonce] { + ms.nextNonce++ + } + } + return + } + + ms.requiredFunds.Sub(ms.requiredFunds, m.Message.RequiredFunds().Int) + //ms.requiredFunds.Sub(ms.requiredFunds, m.Message.Value.Int) + delete(ms.msgs, nonce) + + // adjust next nonce + if applied { + // we removed a (known) message because it was applied in a tipset + // we can't possibly have filled a gap in this case + if nonce >= ms.nextNonce { + ms.nextNonce = nonce + 1 + } + return + } + + // we removed a message because it was pruned + // we have to adjust the nonce if it creates a gap or rewinds state + if nonce < ms.nextNonce { + ms.nextNonce = nonce + } } -type mpoolProvider struct { - sm *stmgr.StateManager - ps *pubsub.PubSub +func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt { + requiredFunds := new(stdbig.Int).Set(ms.requiredFunds) + + m, has := ms.msgs[nonce] + if has { + requiredFunds.Sub(requiredFunds, m.Message.RequiredFunds().Int) + //requiredFunds.Sub(requiredFunds, m.Message.Value.Int) + } + + return types.BigInt{Int: requiredFunds} } -func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { - return &mpoolProvider{sm, ps} -} - -func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { - mpp.sm.ChainStore().SubscribeHeadChanges(cb) - return mpp.sm.ChainStore().GetHeaviestTipSet() -} - -func (mpp *mpoolProvider) PutMessage(m store.ChainMsg) (cid.Cid, error) { - return mpp.sm.ChainStore().PutMessage(m) -} - -func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { - return mpp.ps.Publish(k, v) -} - -func (mpp *mpoolProvider) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { - return mpp.sm.GetActor(addr, ts) -} - -func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - return mpp.sm.ChainStore().MessagesForBlock(h) -} - -func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]store.ChainMsg, error) { - return mpp.sm.ChainStore().MessagesForTipset(ts) -} - -func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { - return mpp.sm.ChainStore().LoadTipSet(tsk) -} - -func New(api Provider, ds dtypes.MetadataDS) (*MessagePool, error) { +func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*MessagePool, error) { cache, _ := lru.New2Q(build.BlsSignatureCacheSize) + verifcache, _ := lru.New2Q(build.VerifSigCacheSize) + + cfg, err := loadConfig(ds) + if err != nil { + return nil, xerrors.Errorf("error loading mpool config: %w", err) + } + mp := &MessagePool{ + ds: ds, + addSema: make(chan struct{}, 1), closer: make(chan struct{}), - repubTk: time.NewTicker(build.BlockDelay * 10 * time.Second), + repubTk: build.Clock.Ticker(RepublishInterval), + repubTrigger: make(chan struct{}, 1), localAddrs: make(map[address.Address]struct{}), pending: make(map[address.Address]*msgSet), minGasPrice: types.NewInt(0), - maxTxPoolSize: 5000, + pruneTrigger: make(chan struct{}, 1), + pruneCooldown: make(chan struct{}, 1), blsSigCache: cache, + sigValCache: verifcache, changes: lps.New(50), localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), api: api, + netName: netName, + cfg: cfg, + evtTypes: [...]journal.EventType{ + evtTypeMpoolAdd: journal.J.RegisterEventType("mpool", "add"), + evtTypeMpoolRemove: journal.J.RegisterEventType("mpool", "remove"), + evtTypeMpoolRepub: journal.J.RegisterEventType("mpool", "repub"), + }, } - if err := mp.loadLocal(); err != nil { - log.Errorf("loading local messages: %+v", err) - } - - go mp.repubLocal() + // enable initial prunes + mp.pruneCooldown <- struct{}{} + // load the current tipset and subscribe to head changes _before_ loading local messages mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error { err := mp.HeadChange(rev, app) if err != nil { @@ -179,6 +355,12 @@ func New(api Provider, ds dtypes.MetadataDS) (*MessagePool, error) { return err }) + if err := mp.loadLocal(); err != nil { + log.Errorf("loading local messages: %+v", err) + } + + go mp.runLoop() + return mp, nil } @@ -187,70 +369,40 @@ func (mp *MessagePool) Close() error { return nil } -func (mp *MessagePool) repubLocal() { +func (mp *MessagePool) Prune() { + // this magic incantation of triggering prune thrice is here to make the Prune method + // synchronous: + // so, its a single slot buffered channel. The first send fills the channel, + // the second send goes through when the pruning starts, + // and the third send goes through (and noops) after the pruning finishes + // and goes through the loop again + mp.pruneTrigger <- struct{}{} + mp.pruneTrigger <- struct{}{} + mp.pruneTrigger <- struct{}{} +} + +func (mp *MessagePool) runLoop() { for { select { case <-mp.repubTk.C: - mp.lk.Lock() - - msgsForAddr := make(map[address.Address][]*types.SignedMessage) - for a := range mp.localAddrs { - msgsForAddr[a] = mp.pendingFor(a) + if err := mp.republishPendingMessages(); err != nil { + log.Errorf("error while republishing messages: %s", err) + } + case <-mp.repubTrigger: + if err := mp.republishPendingMessages(); err != nil { + log.Errorf("error while republishing messages: %s", err) } - mp.lk.Unlock() - - var errout error - outputMsgs := []*types.SignedMessage{} - - for a, msgs := range msgsForAddr { - a, err := mp.api.StateGetActor(a, nil) - if err != nil { - errout = multierr.Append(errout, xerrors.Errorf("could not get actor state: %w", err)) - continue - } - - curNonce := a.Nonce - for _, m := range msgs { - if m.Message.Nonce < curNonce { - continue - } - if m.Message.Nonce != curNonce { - break - } - outputMsgs = append(outputMsgs, m) - curNonce++ - } - + case <-mp.pruneTrigger: + if err := mp.pruneExcessMessages(); err != nil { + log.Errorf("failed to prune excess messages from mempool: %s", err) } - if len(outputMsgs) != 0 { - log.Infow("republishing local messages", "n", len(outputMsgs)) - } - - for _, msg := range outputMsgs { - msgb, err := msg.Serialize() - if err != nil { - errout = multierr.Append(errout, xerrors.Errorf("could not serialize: %w", err)) - continue - } - - err = mp.api.PubSubPublish(msgTopic, msgb) - if err != nil { - errout = multierr.Append(errout, xerrors.Errorf("could not publish: %w", err)) - continue - } - } - - if errout != nil { - log.Errorf("errors while republishing: %+v", errout) - } case <-mp.closer: mp.repubTk.Stop() return } } - } func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error { @@ -263,15 +415,83 @@ func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error { return nil } +// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusio +// and whether the message has enough funds to be included in the next 20 blocks. +// If the message is not valid for block inclusion, it returns an error. +// For local messages, if the message can be included in the next 20 blocks, it returns true to +// signal that it should be immediately published. If the message cannot be included in the next 20 +// blocks, it returns false so that the message doesn't immediately get published (and ignored by our +// peers); instead it will be published through the republish loop, once the base fee has fallen +// sufficiently. +// For non local messages, if the message cannot be included in the next 20 blocks it returns +// a (soft) validation error. +func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) { + epoch := curTs.Height() + minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) + + if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil { + return false, xerrors.Errorf("message will not be included in a block: %w", err) + } + + // this checks if the GasFeeCap is suffisciently high for inclusion in the next 20 blocks + // if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely + // on republish to push it through later, if the baseFee has fallen. + // this is a defensive check that stops minimum baseFee spam attacks from overloading validation + // queues. + // Note that for local messages, we always add them so that they can be accepted and republished + // automatically. + publish := local + + var baseFee big.Int + if len(curTs.Blocks()) > 0 { + baseFee = curTs.Blocks()[0].ParentBaseFee + } else { + var err error + baseFee, err = mp.api.ChainComputeBaseFee(context.TODO(), curTs) + if err != nil { + return false, xerrors.Errorf("computing basefee: %w", err) + } + } + + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactorConservative) + if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) { + if local { + log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)", + m.Message.GasFeeCap, baseFeeLowerBound) + publish = false + } else { + return false, xerrors.Errorf("GasFeeCap doesn't meet base fee lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s): %w", + m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure) + } + } + + return publish, nil +} + func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { + err := mp.checkMessage(m) + if err != nil { + return cid.Undef, err + } + + // serialize push access to reduce lock contention + mp.addSema <- struct{}{} + defer func() { + <-mp.addSema + }() + msgb, err := m.Serialize() if err != nil { return cid.Undef, err } - if err := mp.Add(m); err != nil { + mp.curTsLk.Lock() + publish, err := mp.addTs(m, mp.curTs, true) + if err != nil { + mp.curTsLk.Unlock() return cid.Undef, err } + mp.curTsLk.Unlock() mp.lk.Lock() if err := mp.addLocal(m, msgb); err != nil { @@ -280,21 +500,24 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { } mp.lk.Unlock() - return m.Cid(), mp.api.PubSubPublish(msgTopic, msgb) + if publish { + err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) + } + + return m.Cid(), err } -func (mp *MessagePool) Add(m *types.SignedMessage) error { - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - return mp.addTs(m, mp.curTs) -} - -func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error { +func (mp *MessagePool) checkMessage(m *types.SignedMessage) error { // big messages are bad, anti DOS if m.Size() > 32*1024 { return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig) } + // Perform syntactic validation, minGas=0 as we check the actual mingas before we add it + if err := m.Message.ValidForBlockInclusion(0); err != nil { + return xerrors.Errorf("message not valid for block inclusion: %w", err) + } + if m.Message.To == address.Undef { return ErrInvalidToAddr } @@ -303,45 +526,171 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error return ErrMessageValueTooHigh } - if err := m.Signature.Verify(m.Message.From, m.Message.Cid().Bytes()); err != nil { - log.Warnf("mpooladd signature verification failed: %s", err) + if m.Message.GasFeeCap.LessThan(minimumBaseFee) { + return ErrGasFeeCapTooLow + } + + if err := mp.VerifyMsgSig(m); err != nil { + log.Warnf("signature verification failed: %s", err) return err } + return nil +} + +func (mp *MessagePool) Add(m *types.SignedMessage) error { + err := mp.checkMessage(m) + if err != nil { + return err + } + + // serialize push access to reduce lock contention + mp.addSema <- struct{}{} + defer func() { + <-mp.addSema + }() + + mp.curTsLk.Lock() + defer mp.curTsLk.Unlock() + + _, err = mp.addTs(m, mp.curTs, false) + return err +} + +func sigCacheKey(m *types.SignedMessage) (string, error) { + switch m.Signature.Type { + case crypto.SigTypeBLS: + if len(m.Signature.Data) < 90 { + return "", fmt.Errorf("bls signature too short") + } + + return string(m.Cid().Bytes()) + string(m.Signature.Data[64:]), nil + case crypto.SigTypeSecp256k1: + return string(m.Cid().Bytes()), nil + default: + return "", xerrors.Errorf("unrecognized signature type: %d", m.Signature.Type) + } +} + +func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error { + sck, err := sigCacheKey(m) + if err != nil { + return err + } + + _, ok := mp.sigValCache.Get(sck) + if ok { + // already validated, great + return nil + } + + if err := sigs.Verify(&m.Signature, m.Message.From, m.Message.Cid().Bytes()); err != nil { + return err + } + + mp.sigValCache.Add(sck, struct{}{}) + + return nil +} + +func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) error { + balance, err := mp.getStateBalance(m.Message.From, curTs) + if err != nil { + return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure) + } + + requiredFunds := m.Message.RequiredFunds() + if balance.LessThan(requiredFunds) { + return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrNotEnoughFunds) + } + + // add Value for soft failure check + //requiredFunds = types.BigAdd(requiredFunds, m.Message.Value) + + mset, ok := mp.pending[m.Message.From] + if ok { + requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce)) + } + + if balance.LessThan(requiredFunds) { + // Note: we fail here for ErrSoftValidationFailure to signal a soft failure because we might + // be out of sync. + return xerrors.Errorf("not enough funds including pending messages (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrSoftValidationFailure) + } + + return nil +} + +func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) { snonce, err := mp.getStateNonce(m.Message.From, curTs) if err != nil { - return xerrors.Errorf("failed to look up actor state nonce: %w", err) + return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) + } + + if snonce > m.Message.Nonce { + return false, xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow) + } + + mp.lk.Lock() + defer mp.lk.Unlock() + + publish, err := mp.verifyMsgBeforeAdd(m, curTs, local) + if err != nil { + return false, err + } + + if err := mp.checkBalance(m, curTs); err != nil { + return false, err + } + + return publish, mp.addLocked(m, !local) +} + +func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { + err := mp.checkMessage(m) + if err != nil { + return err + } + + mp.curTsLk.Lock() + defer mp.curTsLk.Unlock() + + curTs := mp.curTs + + snonce, err := mp.getStateNonce(m.Message.From, curTs) + if err != nil { + return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) } if snonce > m.Message.Nonce { return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow) } - balance, err := mp.getStateBalance(m.Message.From, curTs) - if err != nil { - return xerrors.Errorf("failed to check sender balance: %w", err) - } - - if balance.LessThan(m.Message.RequiredFunds()) { - return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(m.Message.RequiredFunds()), types.FIL(balance), ErrNotEnoughFunds) - } - mp.lk.Lock() defer mp.lk.Unlock() - return mp.addLocked(m) + _, err = mp.verifyMsgBeforeAdd(m, curTs, true) + if err != nil { + return err + } + + if err := mp.checkBalance(m, curTs); err != nil { + return err + } + + return mp.addLocked(m, false) } func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error { mp.lk.Lock() defer mp.lk.Unlock() - return mp.addLocked(m) + return mp.addLocked(m, false) } -func (mp *MessagePool) addLocked(m *types.SignedMessage) error { - log.Debugf("mpooladd: %s %s", m.Message.From, m.Message.Nonce) - if m.Signature.Type == types.KTBLS { +func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error { + log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce) + if m.Signature.Type == crypto.SigTypeBLS { mp.blsSigCache.Add(m.Cid(), m.Signature) } @@ -357,18 +706,44 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage) error { mset, ok := mp.pending[m.Message.From] if !ok { - mset = newMsgSet() + nonce, err := mp.getStateNonce(m.Message.From, mp.curTs) + if err != nil { + return xerrors.Errorf("failed to get initial actor nonce: %w", err) + } + + mset = newMsgSet(nonce) mp.pending[m.Message.From] = mset } - if err := mset.add(m); err != nil { - log.Error(err) + incr, err := mset.add(m, mp, strict) + if err != nil { + log.Debug(err) + return err + } + + if incr { + mp.currentSize++ + if mp.currentSize > mp.cfg.SizeLimitHigh { + // send signal to prune messages if it hasnt already been sent + select { + case mp.pruneTrigger <- struct{}{}: + default: + } + } } mp.changes.Pub(api.MpoolUpdate{ Type: api.MpoolAdd, Message: m, }, localUpdates) + + journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolAdd], func() interface{} { + return MessagePoolEvt{ + Action: "add", + Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}}, + } + }) + return nil } @@ -403,42 +778,16 @@ func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet) } func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) { - // TODO: this method probably should be cached - - act, err := mp.api.StateGetActor(addr, curTs) + act, err := mp.api.GetActorAfter(addr, curTs) if err != nil { return 0, err } - baseNonce := act.Nonce - - // TODO: the correct thing to do here is probably to set curTs to chain.head - // but since we have an accurate view of the world until a head change occurs, - // this should be fine - if curTs == nil { - return baseNonce, nil - } - - msgs, err := mp.api.MessagesForTipset(curTs) - if err != nil { - return 0, xerrors.Errorf("failed to check messages for tipset: %w", err) - } - - for _, m := range msgs { - msg := m.VMMessage() - if msg.From == addr { - if msg.Nonce != baseNonce { - return 0, xerrors.Errorf("tipset %s has bad nonce ordering (%d != %d)", curTs.Cids(), msg.Nonce, baseNonce) - } - baseNonce++ - } - } - - return baseNonce, nil + return act.Nonce, nil } func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) { - act, err := mp.api.StateGetActor(addr, ts) + act, err := mp.api.GetActorAfter(addr, ts) if err != nil { return types.EmptyInt, err } @@ -446,22 +795,46 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) ( return act.Balance, nil } -func (mp *MessagePool) PushWithNonce(addr address.Address, cb func(uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) { - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() +func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address, cb func(address.Address, uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) { + // serialize push access to reduce lock contention + mp.addSema <- struct{}{} + defer func() { + <-mp.addSema + }() + mp.curTsLk.Lock() mp.lk.Lock() - defer mp.lk.Unlock() - if addr.Protocol() == address.ID { - log.Warnf("Called pushWithNonce with ID address (%s) this might not be handled properly yet", addr) + + curTs := mp.curTs + + fromKey := addr + if fromKey.Protocol() == address.ID { + var err error + fromKey, err = mp.api.StateAccountKey(ctx, fromKey, mp.curTs) + if err != nil { + mp.lk.Unlock() + mp.curTsLk.Unlock() + return nil, xerrors.Errorf("resolving sender key: %w", err) + } } - nonce, err := mp.getNonceLocked(addr, mp.curTs) + nonce, err := mp.getNonceLocked(fromKey, mp.curTs) + if err != nil { + mp.lk.Unlock() + mp.curTsLk.Unlock() + return nil, xerrors.Errorf("get nonce locked failed: %w", err) + } + + // release the locks for signing + mp.lk.Unlock() + mp.curTsLk.Unlock() + + msg, err := cb(fromKey, nonce) if err != nil { return nil, err } - msg, err := cb(nonce) + err = mp.checkMessage(msg) if err != nil { return nil, err } @@ -471,20 +844,57 @@ func (mp *MessagePool) PushWithNonce(addr address.Address, cb func(uint64) (*typ return nil, err } - if err := mp.addLocked(msg); err != nil { + // reacquire the locks and check state for consistency + mp.curTsLk.Lock() + defer mp.curTsLk.Unlock() + + if mp.curTs != curTs { + return nil, ErrTryAgain + } + + mp.lk.Lock() + defer mp.lk.Unlock() + + nonce2, err := mp.getNonceLocked(fromKey, mp.curTs) + if err != nil { + return nil, xerrors.Errorf("get nonce locked failed: %w", err) + } + + if nonce2 != nonce { + return nil, ErrTryAgain + } + + publish, err := mp.verifyMsgBeforeAdd(msg, curTs, true) + if err != nil { return nil, err } + + if err := mp.checkBalance(msg, curTs); err != nil { + return nil, err + } + + if err := mp.addLocked(msg, false); err != nil { + return nil, xerrors.Errorf("add locked failed: %w", err) + } if err := mp.addLocal(msg, msgb); err != nil { log.Errorf("addLocal failed: %+v", err) } - return msg, mp.api.PubSubPublish(msgTopic, msgb) + if publish { + err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) + } + + return msg, err } -func (mp *MessagePool) Remove(from address.Address, nonce uint64) { +func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) { mp.lk.Lock() defer mp.lk.Unlock() + mp.remove(from, nonce, applied) +} + +func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) { mset, ok := mp.pending[from] if !ok { return @@ -495,26 +905,22 @@ func (mp *MessagePool) Remove(from address.Address, nonce uint64) { Type: api.MpoolRemove, Message: m, }, localUpdates) + + journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRemove], func() interface{} { + return MessagePoolEvt{ + Action: "remove", + Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}}} + }) + + mp.currentSize-- } // NB: This deletes any message with the given nonce. This makes sense // as two messages with the same sender cannot have the same nonce - delete(mset.msgs, nonce) + mset.rm(nonce, applied) if len(mset.msgs) == 0 { delete(mp.pending, from) - } else { - var max uint64 - for nonce := range mset.msgs { - if max < nonce { - max = nonce - } - } - if max < nonce { - max = nonce // we could have not seen the removed message before - } - - mset.nextNonce = max + 1 } } @@ -525,6 +931,10 @@ func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) { mp.lk.Lock() defer mp.lk.Unlock() + return mp.allPending() +} + +func (mp *MessagePool) allPending() ([]*types.SignedMessage, *types.TipSet) { out := make([]*types.SignedMessage, 0) for a := range mp.pending { out = append(out, mp.pendingFor(a)...) @@ -533,6 +943,15 @@ func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) { return out, mp.curTs } +func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) { + mp.curTsLk.Lock() + defer mp.curTsLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + return mp.pendingFor(a), mp.curTs +} + func (mp *MessagePool) pendingFor(a address.Address) []*types.SignedMessage { mset := mp.pending[a] if mset == nil || len(mset.msgs) == 0 { @@ -556,6 +975,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) mp.curTsLk.Lock() defer mp.curTsLk.Unlock() + repubTrigger := false rmsgs := make(map[address.Address]map[uint64]*types.SignedMessage) add := func(m *types.SignedMessage) { s, ok := rmsgs[m.Message.From] @@ -568,7 +988,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) rm := func(from address.Address, nonce uint64) { s, ok := rmsgs[from] if !ok { - mp.Remove(from, nonce) + mp.Remove(from, nonce, true) return } @@ -577,43 +997,73 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) return } - mp.Remove(from, nonce) + mp.Remove(from, nonce, true) } + maybeRepub := func(cid cid.Cid) { + if !repubTrigger { + mp.lk.Lock() + _, republished := mp.republished[cid] + mp.lk.Unlock() + if republished { + repubTrigger = true + } + } + } + + var merr error + for _, ts := range revert { pts, err := mp.api.LoadTipSet(ts.Parents()) if err != nil { - return err - } - - msgs, err := mp.MessagesForBlocks(ts.Blocks()) - if err != nil { - return err + log.Errorf("error loading reverted tipset parent: %s", err) + merr = multierror.Append(merr, err) + continue } mp.curTs = pts + msgs, err := mp.MessagesForBlocks(ts.Blocks()) + if err != nil { + log.Errorf("error retrieving messages for reverted block: %s", err) + merr = multierror.Append(merr, err) + continue + } + for _, msg := range msgs { add(msg) } } for _, ts := range apply { + mp.curTs = ts + for _, b := range ts.Blocks() { bmsgs, smsgs, err := mp.api.MessagesForBlock(b) if err != nil { - return xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) + xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) + log.Errorf("error retrieving messages for block: %s", xerr) + merr = multierror.Append(merr, xerr) + continue } + for _, msg := range smsgs { rm(msg.Message.From, msg.Message.Nonce) + maybeRepub(msg.Cid()) } for _, msg := range bmsgs { rm(msg.From, msg.Nonce) + maybeRepub(msg.Cid()) } } + } - mp.curTs = ts + if repubTrigger { + select { + case mp.repubTrigger <- struct{}{}: + default: + } } for _, s := range rmsgs { @@ -624,7 +1074,144 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) } } - return nil + if len(revert) > 0 && futureDebug { + mp.lk.Lock() + msgs, ts := mp.allPending() + mp.lk.Unlock() + + buckets := map[address.Address]*statBucket{} + + for _, v := range msgs { + bkt, ok := buckets[v.Message.From] + if !ok { + bkt = &statBucket{ + msgs: map[uint64]*types.SignedMessage{}, + } + buckets[v.Message.From] = bkt + } + + bkt.msgs[v.Message.Nonce] = v + } + + for a, bkt := range buckets { + // TODO that might not be correct with GatActorAfter but it is only debug code + act, err := mp.api.GetActorAfter(a, ts) + if err != nil { + log.Debugf("%s, err: %s\n", a, err) + continue + } + + var cmsg *types.SignedMessage + var ok bool + + cur := act.Nonce + for { + cmsg, ok = bkt.msgs[cur] + if !ok { + break + } + cur++ + } + + ff := uint64(math.MaxUint64) + for k := range bkt.msgs { + if k > cur && k < ff { + ff = k + } + } + + if ff != math.MaxUint64 { + m := bkt.msgs[ff] + + // cmsg can be nil if no messages from the current nonce are in the mpool + ccid := "nil" + if cmsg != nil { + ccid = cmsg.Cid().String() + } + + log.Debugw("Nonce gap", + "actor", a, + "future_cid", m.Cid(), + "future_nonce", ff, + "current_cid", ccid, + "current_nonce", cur, + "revert_tipset", revert[0].Key(), + "new_head", ts.Key(), + ) + } + } + } + + return merr +} + +func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { + add := func(m *types.SignedMessage) { + s, ok := rmsgs[m.Message.From] + if !ok { + s = make(map[uint64]*types.SignedMessage) + rmsgs[m.Message.From] = s + } + s[m.Message.Nonce] = m + } + rm := func(from address.Address, nonce uint64) { + s, ok := rmsgs[from] + if !ok { + return + } + + if _, ok := s[nonce]; ok { + delete(s, nonce) + return + } + + } + + revert, apply, err := store.ReorgOps(mp.api.LoadTipSet, from, to) + if err != nil { + return xerrors.Errorf("failed to compute reorg ops for mpool pending messages: %w", err) + } + + var merr error + + for _, ts := range revert { + msgs, err := mp.MessagesForBlocks(ts.Blocks()) + if err != nil { + log.Errorf("error retrieving messages for reverted block: %s", err) + merr = multierror.Append(merr, err) + continue + } + + for _, msg := range msgs { + add(msg) + } + } + + for _, ts := range apply { + for _, b := range ts.Blocks() { + bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + if err != nil { + xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) + log.Errorf("error retrieving messages for block: %s", xerr) + merr = multierror.Append(merr, xerr) + continue + } + + for _, msg := range smsgs { + rm(msg.Message.From, msg.Message.Nonce) + } + + for _, msg := range bmsgs { + rm(msg.From, msg.Nonce) + } + } + } + + return merr +} + +type statBucket struct { + msgs map[uint64]*types.SignedMessage } func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.SignedMessage, error) { @@ -655,7 +1242,7 @@ func (mp *MessagePool) RecoverSig(msg *types.Message) *types.SignedMessage { if !ok { return nil } - sig, ok := val.(types.Signature) + sig, ok := val.(crypto.Signature) if !ok { log.Errorf("value in signature cache was not a signature (got %T)", val) return nil @@ -673,6 +1260,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err go func() { defer mp.changes.Unsub(sub, localUpdates) + defer close(out) for { select { @@ -681,9 +1269,13 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err case out <- u.(api.MpoolUpdate): case <-ctx.Done(): return + case <-mp.closer: + return } case <-ctx.Done(): return + case <-mp.closer: + return } } }() @@ -707,14 +1299,62 @@ func (mp *MessagePool) loadLocal() error { return xerrors.Errorf("unmarshaling local message: %w", err) } - if err := mp.Add(&sm); err != nil { + if err := mp.addLoaded(&sm); err != nil { if xerrors.Is(err, ErrNonceTooLow) { continue // todo: drop the message from local cache (if above certain confidence threshold) } log.Errorf("adding local message: %+v", err) } + + mp.localAddrs[sm.Message.From] = struct{}{} } return nil } + +func (mp *MessagePool) Clear(local bool) { + mp.lk.Lock() + defer mp.lk.Unlock() + + // remove everything if local is true, including removing local messages from + // the datastore + if local { + for a := range mp.localAddrs { + mset, ok := mp.pending[a] + if !ok { + continue + } + + for _, m := range mset.msgs { + err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) + if err != nil { + log.Warnf("error deleting local message: %s", err) + } + } + } + + mp.pending = make(map[address.Address]*msgSet) + mp.republished = nil + + return + } + + // remove everything except the local messages + for a := range mp.pending { + _, isLocal := mp.localAddrs[a] + if isLocal { + continue + } + delete(mp.pending, a) + } +} + +func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt { + baseFeeLowerBound := types.BigDiv(baseFee, factor) + if baseFeeLowerBound.LessThan(minimumBaseFee) { + baseFeeLowerBound = minimumBaseFee + } + + return baseFeeLowerBound +} diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index 514806eb1..a4aa059ca 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -1,82 +1,166 @@ package messagepool import ( + "context" "fmt" + "sort" "testing" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/wallet" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" ) -type testMpoolApi struct { +func init() { + _ = logging.SetLogLevel("*", "INFO") +} + +type testMpoolAPI struct { cb func(rev, app []*types.TipSet) error bmsgs map[cid.Cid][]*types.SignedMessage statenonce map[address.Address]uint64 + balance map[address.Address]types.BigInt tipsets []*types.TipSet + + published int + + baseFee types.BigInt } -func newTestMpoolApi() *testMpoolApi { - return &testMpoolApi{ +func newTestMpoolAPI() *testMpoolAPI { + tma := &testMpoolAPI{ bmsgs: make(map[cid.Cid][]*types.SignedMessage), statenonce: make(map[address.Address]uint64), + balance: make(map[address.Address]types.BigInt), + baseFee: types.NewInt(100), } + genesis := mock.MkBlock(nil, 1, 1) + tma.tipsets = append(tma.tipsets, mock.TipSet(genesis)) + return tma } -func (tma *testMpoolApi) applyBlock(t *testing.T, b *types.BlockHeader) { +func (tma *testMpoolAPI) nextBlock() *types.BlockHeader { + newBlk := mock.MkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1) + tma.tipsets = append(tma.tipsets, mock.TipSet(newBlk)) + return newBlk +} + +func (tma *testMpoolAPI) nextBlockWithHeight(height uint64) *types.BlockHeader { + newBlk := mock.MkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1) + newBlk.Height = abi.ChainEpoch(height) + tma.tipsets = append(tma.tipsets, mock.TipSet(newBlk)) + return newBlk +} + +func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) { t.Helper() if err := tma.cb(nil, []*types.TipSet{mock.TipSet(b)}); err != nil { t.Fatal(err) } } -func (tma *testMpoolApi) revertBlock(t *testing.T, b *types.BlockHeader) { +func (tma *testMpoolAPI) revertBlock(t *testing.T, b *types.BlockHeader) { t.Helper() if err := tma.cb([]*types.TipSet{mock.TipSet(b)}, nil); err != nil { t.Fatal(err) } } -func (tma *testMpoolApi) setStateNonce(addr address.Address, v uint64) { +func (tma *testMpoolAPI) setStateNonce(addr address.Address, v uint64) { tma.statenonce[addr] = v } -func (tma *testMpoolApi) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) { +func (tma *testMpoolAPI) setBalance(addr address.Address, v uint64) { + tma.balance[addr] = types.FromFil(v) +} + +func (tma *testMpoolAPI) setBalanceRaw(addr address.Address, v types.BigInt) { + tma.balance[addr] = v +} + +func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) { tma.bmsgs[h.Cid()] = msgs - tma.tipsets = append(tma.tipsets, mock.TipSet(h)) } -func (tma *testMpoolApi) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { +func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { tma.cb = cb - return nil + return tma.tipsets[0] } -func (tma *testMpoolApi) PutMessage(m store.ChainMsg) (cid.Cid, error) { +func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { return cid.Undef, nil } -func (tma *testMpoolApi) PubSubPublish(string, []byte) error { +func (tma *testMpoolAPI) PubSubPublish(string, []byte) error { + tma.published++ return nil } -func (tma *testMpoolApi) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { +func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) { + // regression check for load bug + if ts == nil { + panic("GetActorAfter called with nil tipset") + } + + balance, ok := tma.balance[addr] + if !ok { + balance = types.NewInt(1000e6) + tma.balance[addr] = balance + } + + msgs := make([]*types.SignedMessage, 0) + for _, b := range ts.Blocks() { + for _, m := range tma.bmsgs[b.Cid()] { + if m.Message.From == addr { + msgs = append(msgs, m) + } + } + } + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Message.Nonce < msgs[j].Message.Nonce + }) + + nonce := tma.statenonce[addr] + + for _, m := range msgs { + if m.Message.Nonce != nonce { + break + } + nonce++ + } + return &types.Actor{ - Nonce: tma.statenonce[addr], - Balance: types.NewInt(90000000), + Code: builtin.StorageMarketActorCodeID, + Nonce: nonce, + Balance: balance, }, nil } -func (tma *testMpoolApi) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { +func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { + return address.Undef, fmt.Errorf("given address was not a key addr") + } + return addr, nil +} + +func (tma *testMpoolAPI) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { return nil, tma.bmsgs[h.Cid()], nil } -func (tma *testMpoolApi) MessagesForTipset(ts *types.TipSet) ([]store.ChainMsg, error) { +func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { if len(ts.Blocks()) != 1 { panic("cant deal with multiblock tipsets in this test") } @@ -86,7 +170,7 @@ func (tma *testMpoolApi) MessagesForTipset(ts *types.TipSet) ([]store.ChainMsg, return nil, err } - var out []store.ChainMsg + var out []types.ChainMsg for _, m := range bm { out = append(out, m) } @@ -98,7 +182,7 @@ func (tma *testMpoolApi) MessagesForTipset(ts *types.TipSet) ([]store.ChainMsg, return out, nil } -func (tma *testMpoolApi) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { +func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { for _, ts := range tma.tipsets { if types.CidArrsEqual(tsk.Cids(), ts.Cids()) { return ts, nil @@ -108,6 +192,10 @@ func (tma *testMpoolApi) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) return nil, fmt.Errorf("tipset not found") } +func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { + return tma.baseFee, nil +} + func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { t.Helper() n, err := mp.GetNonce(addr) @@ -128,7 +216,7 @@ func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) { } func TestMessagePool(t *testing.T) { - tma := newTestMpoolApi() + tma := newTestMpoolAPI() w, err := wallet.NewWallet(wallet.NewMemKeyStore()) if err != nil { @@ -137,14 +225,14 @@ func TestMessagePool(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds) + mp, err := New(tma, ds, "mptest") if err != nil { t.Fatal(err) } - a := mock.MkBlock(nil, 1, 1) + a := tma.nextBlock() - sender, err := w.GenerateKey(types.KTBLS) + sender, err := w.GenerateKey(crypto.SigTypeBLS) if err != nil { t.Fatal(err) } @@ -168,8 +256,8 @@ func TestMessagePool(t *testing.T) { assertNonce(t, mp, sender, 2) } -func TestRevertMessages(t *testing.T) { - tma := newTestMpoolApi() +func TestMessagePoolMessagesInEachBlock(t *testing.T) { + tma := newTestMpoolAPI() w, err := wallet.NewWallet(wallet.NewMemKeyStore()) if err != nil { @@ -178,15 +266,64 @@ func TestRevertMessages(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds) + mp, err := New(tma, ds, "mptest") if err != nil { t.Fatal(err) } - a := mock.MkBlock(nil, 1, 1) - b := mock.MkBlock(mock.TipSet(a), 1, 1) + a := tma.nextBlock() - sender, err := w.GenerateKey(types.KTBLS) + sender, err := w.GenerateKey(crypto.SigTypeBLS) + if err != nil { + t.Fatal(err) + } + target := mock.Address(1001) + + var msgs []*types.SignedMessage + for i := 0; i < 5; i++ { + m := mock.MkMessage(sender, target, uint64(i), w) + msgs = append(msgs, m) + mustAdd(t, mp, m) + } + + tma.setStateNonce(sender, 0) + + tma.setBlockMessages(a, msgs[0], msgs[1]) + tma.applyBlock(t, a) + tsa := mock.TipSet(a) + + _, _ = mp.Pending() + + selm, _ := mp.SelectMessages(tsa, 1) + if len(selm) == 0 { + t.Fatal("should have returned the rest of the messages") + } +} + +func TestRevertMessages(t *testing.T) { + futureDebug = true + defer func() { + futureDebug = false + }() + + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + a := tma.nextBlock() + b := tma.nextBlock() + + sender, err := w.GenerateKey(crypto.SigTypeBLS) if err != nil { t.Fatal(err) } @@ -218,8 +355,319 @@ func TestRevertMessages(t *testing.T) { assertNonce(t, mp, sender, 4) p, _ := mp.Pending() + fmt.Printf("%+v\n", p) if len(p) != 3 { t.Fatal("expected three messages in mempool") } } + +func TestPruningSimple(t *testing.T) { + oldMaxNonceGap := MaxNonceGap + MaxNonceGap = 1000 + defer func() { + MaxNonceGap = oldMaxNonceGap + }() + + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + a := tma.nextBlock() + tma.applyBlock(t, a) + + sender, err := w.GenerateKey(crypto.SigTypeBLS) + if err != nil { + t.Fatal(err) + } + tma.setBalance(sender, 1) // in FIL + target := mock.Address(1001) + + for i := 0; i < 5; i++ { + smsg := mock.MkMessage(sender, target, uint64(i), w) + if err := mp.Add(smsg); err != nil { + t.Fatal(err) + } + } + + for i := 10; i < 50; i++ { + smsg := mock.MkMessage(sender, target, uint64(i), w) + if err := mp.Add(smsg); err != nil { + t.Fatal(err) + } + } + + mp.cfg.SizeLimitHigh = 40 + mp.cfg.SizeLimitLow = 10 + + mp.Prune() + + msgs, _ := mp.Pending() + if len(msgs) != 5 { + t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) + } +} + +func TestLoadLocal(t *testing.T) { + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + msgs := make(map[cid.Cid]struct{}) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + cid, err := mp.Push(m) + if err != nil { + t.Fatal(err) + } + msgs[cid] = struct{}{} + } + err = mp.Close() + if err != nil { + t.Fatal(err) + } + + mp, err = New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + pmsgs, _ := mp.Pending() + if len(msgs) != len(pmsgs) { + t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) + } + + for _, m := range pmsgs { + cid := m.Cid() + _, ok := msgs[cid] + if !ok { + t.Fatal("unknown message") + } + + delete(msgs, cid) + } + + if len(msgs) > 0 { + t.Fatalf("not all messages were laoded; missing %d messages", len(msgs)) + } +} + +func TestClearAll(t *testing.T) { + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(m) + if err != nil { + t.Fatal(err) + } + } + + for i := 0; i < 10; i++ { + m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + mp.Clear(true) + + pending, _ := mp.Pending() + if len(pending) > 0 { + t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) + } +} + +func TestClearNonLocal(t *testing.T) { + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(m) + if err != nil { + t.Fatal(err) + } + } + + for i := 0; i < 10; i++ { + m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + mp.Clear(false) + + pending, _ := mp.Pending() + if len(pending) != 10 { + t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) + } + + for _, m := range pending { + if m.Message.From != a1 { + t.Fatalf("expected message from %s but got one from %s instead", a1, m.Message.From) + } + } +} + +func TestUpdates(t *testing.T) { + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + + ch, err := mp.Updates(ctx) + if err != nil { + t.Fatal(err) + } + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(m) + if err != nil { + t.Fatal(err) + } + + _, ok := <-ch + if !ok { + t.Fatal("expected update, but got a closed channel instead") + } + } + + err = mp.Close() + if err != nil { + t.Fatal(err) + } + + _, ok := <-ch + if ok { + t.Fatal("expected closed channel, but got an update instead") + } +} diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go new file mode 100644 index 000000000..347e90044 --- /dev/null +++ b/chain/messagepool/provider.go @@ -0,0 +1,83 @@ +package messagepool + +import ( + "context" + + "github.com/ipfs/go-cid" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" +) + +type Provider interface { + SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet + PutMessage(m types.ChainMsg) (cid.Cid, error) + PubSubPublish(string, []byte) error + GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error) + StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error) + MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) + LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) + ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) +} + +type mpoolProvider struct { + sm *stmgr.StateManager + ps *pubsub.PubSub +} + +func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { + return &mpoolProvider{sm: sm, ps: ps} +} + +func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { + mpp.sm.ChainStore().SubscribeHeadChanges(cb) + return mpp.sm.ChainStore().GetHeaviestTipSet() +} + +func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) { + return mpp.sm.ChainStore().PutMessage(m) +} + +func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { + return mpp.ps.Publish(k, v) //nolint +} + +func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) { + stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts) + if err != nil { + return nil, xerrors.Errorf("computing tipset state for GetActor: %w", err) + } + st, err := mpp.sm.StateTree(stcid) + if err != nil { + return nil, xerrors.Errorf("failed to load state tree: %w", err) + } + return st.GetActor(addr) +} + +func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + return mpp.sm.ResolveToKeyAddress(ctx, addr, ts) +} + +func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + return mpp.sm.ChainStore().MessagesForBlock(h) +} + +func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { + return mpp.sm.ChainStore().MessagesForTipset(ts) +} + +func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { + return mpp.sm.ChainStore().LoadTipSet(tsk) +} + +func (mpp *mpoolProvider) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { + baseFee, err := mpp.sm.ChainStore().ComputeBaseFee(ctx, ts) + if err != nil { + return types.NewInt(0), xerrors.Errorf("computing base fee at %s: %w", ts, err) + } + return baseFee, nil +} diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go new file mode 100644 index 000000000..d0e53795a --- /dev/null +++ b/chain/messagepool/pruning.go @@ -0,0 +1,113 @@ +package messagepool + +import ( + "context" + "sort" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +func (mp *MessagePool) pruneExcessMessages() error { + mp.curTsLk.Lock() + ts := mp.curTs + mp.curTsLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + if mp.currentSize < mp.cfg.SizeLimitHigh { + return nil + } + + select { + case <-mp.pruneCooldown: + err := mp.pruneMessages(context.TODO(), ts) + go func() { + time.Sleep(mp.cfg.PruneCooldown) + mp.pruneCooldown <- struct{}{} + }() + return err + default: + return xerrors.New("cannot prune before cooldown") + } +} + +func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) error { + start := time.Now() + defer func() { + log.Infof("message pruning took %s", time.Since(start)) + }() + + baseFee, err := mp.api.ChainComputeBaseFee(ctx, ts) + if err != nil { + return xerrors.Errorf("computing basefee: %w", err) + } + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) + + pending, _ := mp.getPendingMessages(ts, ts) + + // protected actors -- not pruned + protected := make(map[address.Address]struct{}) + + // we never prune priority addresses + for _, actor := range mp.cfg.PriorityAddrs { + protected[actor] = struct{}{} + } + + // we also never prune locally published messages + for actor := range mp.localAddrs { + protected[actor] = struct{}{} + } + + // Collect all messages to track which ones to remove and create chains for block inclusion + pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize) + keepCount := 0 + + var chains []*msgChain + for actor, mset := range pending { + // we never prune protected actors + _, keep := protected[actor] + if keep { + keepCount += len(mset) + continue + } + + // not a protected actor, track the messages and create chains + for _, m := range mset { + pruneMsgs[m.Message.Cid()] = m + } + actorChains := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts) + chains = append(chains, actorChains...) + } + + // Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + // Keep messages (remove them from pruneMsgs) from chains while we are under the low water mark + loWaterMark := mp.cfg.SizeLimitLow +keepLoop: + for _, chain := range chains { + for _, m := range chain.msgs { + if keepCount < loWaterMark { + delete(pruneMsgs, m.Message.Cid()) + keepCount++ + } else { + break keepLoop + } + } + } + + // and remove all messages that are still in pruneMsgs after processing the chains + log.Infof("Pruning %d messages", len(pruneMsgs)) + for _, m := range pruneMsgs { + mp.remove(m.Message.From, m.Message.Nonce, false) + } + + return nil +} diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go new file mode 100644 index 000000000..db31e18c2 --- /dev/null +++ b/chain/messagepool/repub.go @@ -0,0 +1,175 @@ +package messagepool + +import ( + "context" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/messagepool/gasguess" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/journal" + "github.com/ipfs/go-cid" +) + +const repubMsgLimit = 30 + +var RepublishBatchDelay = 100 * time.Millisecond + +func (mp *MessagePool) republishPendingMessages() error { + mp.curTsLk.Lock() + ts := mp.curTs + + baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) + if err != nil { + mp.curTsLk.Unlock() + return xerrors.Errorf("computing basefee: %w", err) + } + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) + + pending := make(map[address.Address]map[uint64]*types.SignedMessage) + mp.lk.Lock() + mp.republished = nil // clear this to avoid races triggering an early republish + for actor := range mp.localAddrs { + mset, ok := mp.pending[actor] + if !ok { + continue + } + if len(mset.msgs) == 0 { + continue + } + // we need to copy this while holding the lock to avoid races with concurrent modification + pend := make(map[uint64]*types.SignedMessage, len(mset.msgs)) + for nonce, m := range mset.msgs { + pend[nonce] = m + } + pending[actor] = pend + } + mp.lk.Unlock() + mp.curTsLk.Unlock() + + if len(pending) == 0 { + return nil + } + + var chains []*msgChain + for actor, mset := range pending { + // We use the baseFee lower bound for createChange so that we optimistically include + // chains that might become profitable in the next 20 blocks. + // We still check the lowerBound condition for individual messages so that we don't send + // messages that will be rejected by the mpool spam protector, so this is safe to do. + next := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts) + chains = append(chains, next...) + } + + if len(chains) == 0 { + return nil + } + + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + gasLimit := int64(build.BlockGasLimit) + minGas := int64(gasguess.MinGas) + var msgs []*types.SignedMessage +loop: + for i := 0; i < len(chains); { + chain := chains[i] + + // we can exceed this if we have picked (some) longer chain already + if len(msgs) > repubMsgLimit { + break + } + + // there is not enough gas for any message + if gasLimit <= minGas { + break + } + + // has the chain been invalidated? + if !chain.valid { + i++ + continue + } + + // does it fit in a block? + if chain.gasLimit <= gasLimit { + // check the baseFee lower bound -- only republish messages that can be included in the chain + // within the next 20 blocks. + for _, m := range chain.msgs { + if !allowNegativeChains(ts.Height()) && m.Message.GasFeeCap.LessThan(baseFeeLowerBound) { + chain.Invalidate() + continue loop + } + gasLimit -= m.Message.GasLimit + msgs = append(msgs, m) + } + + // we processed the whole chain, advance + i++ + continue + } + + // we can't fit the current chain but there is gas to spare + // trim it and push it down + chain.Trim(gasLimit, mp, baseFee, true) + for j := i; j < len(chains)-1; j++ { + if chains[j].Before(chains[j+1]) { + break + } + chains[j], chains[j+1] = chains[j+1], chains[j] + } + } + + count := 0 + log.Infof("republishing %d messages", len(msgs)) + for _, m := range msgs { + mb, err := m.Serialize() + if err != nil { + return xerrors.Errorf("cannot serialize message: %w", err) + } + + err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), mb) + if err != nil { + return xerrors.Errorf("cannot publish: %w", err) + } + + count++ + + if count < len(msgs) { + // this delay is here to encourage the pubsub subsystem to process the messages serially + // and avoid creating nonce gaps because of concurrent validation. + time.Sleep(RepublishBatchDelay) + } + } + + if len(msgs) > 0 { + journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} { + msgs := make([]MessagePoolEvtMessage, 0, len(msgs)) + for _, m := range msgs { + msgs = append(msgs, MessagePoolEvtMessage{Message: m.Message, CID: m.Cid()}) + } + return MessagePoolEvt{ + Action: "repub", + Messages: msgs, + } + }) + } + + // track most recently republished messages + republished := make(map[cid.Cid]struct{}) + for _, m := range msgs[:count] { + republished[m.Cid()] = struct{}{} + } + + mp.lk.Lock() + // update the republished set so that we can trigger early republish from head changes + mp.republished = republished + mp.lk.Unlock() + + return nil +} diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go new file mode 100644 index 000000000..491f484f5 --- /dev/null +++ b/chain/messagepool/repub_test.go @@ -0,0 +1,72 @@ +package messagepool + +import ( + "testing" + "time" + + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/messagepool/gasguess" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/ipfs/go-datastore" +) + +func TestRepubMessages(t *testing.T) { + oldRepublishBatchDelay := RepublishBatchDelay + RepublishBatchDelay = time.Microsecond + defer func() { + RepublishBatchDelay = oldRepublishBatchDelay + }() + + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + + mp, err := New(tma, ds, "mptest") + if err != nil { + t.Fatal(err) + } + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + _, err := mp.Push(m) + if err != nil { + t.Fatal(err) + } + } + + if tma.published != 10 { + t.Fatalf("expected to have published 10 messages, but got %d instead", tma.published) + } + + mp.repubTrigger <- struct{}{} + time.Sleep(100 * time.Millisecond) + + if tma.published != 20 { + t.Fatalf("expected to have published 20 messages, but got %d instead", tma.published) + } +} diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go new file mode 100644 index 000000000..2ddbed0ad --- /dev/null +++ b/chain/messagepool/selection.go @@ -0,0 +1,939 @@ +package messagepool + +import ( + "context" + "math/big" + "math/rand" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + tbig "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/messagepool/gasguess" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" +) + +var bigBlockGasLimit = big.NewInt(build.BlockGasLimit) + +// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go +// away in the next fork. +func allowNegativeChains(epoch abi.ChainEpoch) bool { + return epoch < build.UpgradeBreezeHeight+5 +} + +const MaxBlocks = 15 + +type msgChain struct { + msgs []*types.SignedMessage + gasReward *big.Int + gasLimit int64 + gasPerf float64 + effPerf float64 + bp float64 + parentOffset float64 + valid bool + merged bool + next *msgChain + prev *msgChain +} + +func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { + mp.curTsLk.Lock() + defer mp.curTsLk.Unlock() + + mp.lk.Lock() + defer mp.lk.Unlock() + + // if the ticket quality is high enough that the first block has higher probability + // than any other block, then we don't bother with optimal selection because the + // first block will always have higher effective performance + if tq > 0.84 { + return mp.selectMessagesGreedy(mp.curTs, ts) + } + + return mp.selectMessagesOptimal(mp.curTs, ts, tq) +} + +func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { + start := time.Now() + + baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) + if err != nil { + return nil, xerrors.Errorf("computing basefee: %w", err) + } + + // 0. Load messages from the target tipset; if it is the same as the current tipset in + // the mpool, then this is just the pending messages + pending, err := mp.getPendingMessages(curTs, ts) + if err != nil { + return nil, err + } + + if len(pending) == 0 { + return nil, nil + } + + // defer only here so if we have no pending messages we don't spam + defer func() { + log.Infow("message selection done", "took", time.Since(start)) + }() + + // 0b. Select all priority messages that fit in the block + minGas := int64(gasguess.MinGas) + result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) + + // have we filled the block? + if gasLimit < minGas { + return result, nil + } + + // 1. Create a list of dependent message chains with maximal gas reward per limit consumed + startChains := time.Now() + var chains []*msgChain + for actor, mset := range pending { + next := mp.createMessageChains(actor, mset, baseFee, ts) + chains = append(chains, next...) + } + if dt := time.Since(startChains); dt > time.Millisecond { + log.Infow("create message chains done", "took", dt) + } + + // 2. Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 { + log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf) + return result, nil + } + + // 3. Parition chains into blocks (without trimming) + // we use the full blockGasLimit (as opposed to the residual gas limit from the + // priority message selection) as we have to account for what other miners are doing + nextChain := 0 + partitions := make([][]*msgChain, MaxBlocks) + for i := 0; i < MaxBlocks && nextChain < len(chains); i++ { + gasLimit := int64(build.BlockGasLimit) + for nextChain < len(chains) { + chain := chains[nextChain] + nextChain++ + partitions[i] = append(partitions[i], chain) + gasLimit -= chain.gasLimit + if gasLimit < minGas { + break + } + } + + } + + // 4. Compute effective performance for each chain, based on the partition they fall into + // The effective performance is the gasPerf of the chain * block probability + blockProb := mp.blockProbabilities(tq) + effChains := 0 + for i := 0; i < MaxBlocks; i++ { + for _, chain := range partitions[i] { + chain.SetEffectivePerf(blockProb[i]) + } + effChains += len(partitions[i]) + } + + // nullify the effective performance of chains that don't fit in any partition + for _, chain := range chains[effChains:] { + chain.SetNullEffectivePerf() + } + + // 5. Resort the chains based on effective performance + sort.Slice(chains, func(i, j int) bool { + return chains[i].BeforeEffective(chains[j]) + }) + + // 6. Merge the head chains to produce the list of messages selected for inclusion + // subject to the residual gas limit + // When a chain is merged in, all its previous dependent chains *must* also be + // merged in or we'll have a broken block + startMerge := time.Now() + last := len(chains) + for i, chain := range chains { + // did we run out of performing chains? + if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + break + } + + // has it already been merged? + if chain.merged { + continue + } + + // compute the dependencies that must be merged and the gas limit including deps + chainGasLimit := chain.gasLimit + var chainDeps []*msgChain + for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { + chainDeps = append(chainDeps, curChain) + chainGasLimit += curChain.gasLimit + } + + // does it all fit in the block? + if chainGasLimit <= gasLimit { + // include it together with all dependencies + for i := len(chainDeps) - 1; i >= 0; i-- { + curChain := chainDeps[i] + curChain.merged = true + result = append(result, curChain.msgs...) + } + + chain.merged = true + // adjust the effective pefromance for all subsequent chains + if next := chain.next; next != nil && next.effPerf > 0 { + next.effPerf += next.parentOffset + for next = next.next; next != nil && next.effPerf > 0; next = next.next { + next.setEffPerf() + } + } + result = append(result, chain.msgs...) + gasLimit -= chainGasLimit + + // resort to account for already merged chains and effective performance adjustments + // the sort *must* be stable or we end up getting negative gasPerfs pushed up. + sort.SliceStable(chains[i+1:], func(i, j int) bool { + return chains[i].BeforeEffective(chains[j]) + }) + + continue + } + + // we can't fit this chain and its dependencies because of block gasLimit -- we are + // at the edge + last = i + break + } + if dt := time.Since(startMerge); dt > time.Millisecond { + log.Infow("merge message chains done", "took", dt) + } + + // 7. We have reached the edge of what can fit wholesale; if we still hae available + // gasLimit to pack some more chains, then trim the last chain and push it down. + // Trimming invalidaates subsequent dependent chains so that they can't be selected + // as their dependency cannot be (fully) included. + // We do this in a loop because the blocker might have been inordinately large and + // we might have to do it multiple times to satisfy tail packing + startTail := time.Now() +tailLoop: + for gasLimit >= minGas && last < len(chains) { + // trim if necessary + if chains[last].gasLimit > gasLimit { + chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + } + + // push down if it hasn't been invalidated + if chains[last].valid { + for i := last; i < len(chains)-1; i++ { + if chains[i].BeforeEffective(chains[i+1]) { + break + } + chains[i], chains[i+1] = chains[i+1], chains[i] + } + } + + // select the next (valid and fitting) chain and its dependencies for inclusion + for i, chain := range chains[last:] { + // has the chain been invalidated? + if !chain.valid { + continue + } + + // has it already been merged? + if chain.merged { + continue + } + + // if gasPerf < 0 we have no more profitable chains + if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + break tailLoop + } + + // compute the dependencies that must be merged and the gas limit including deps + chainGasLimit := chain.gasLimit + depGasLimit := int64(0) + var chainDeps []*msgChain + for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { + chainDeps = append(chainDeps, curChain) + chainGasLimit += curChain.gasLimit + depGasLimit += curChain.gasLimit + } + + // does it all fit in the bock + if chainGasLimit <= gasLimit { + // include it together with all dependencies + for i := len(chainDeps) - 1; i >= 0; i-- { + curChain := chainDeps[i] + curChain.merged = true + result = append(result, curChain.msgs...) + } + + chain.merged = true + result = append(result, chain.msgs...) + gasLimit -= chainGasLimit + continue + } + + // it doesn't all fit; now we have to take into account the dependent chains before + // making a decision about trimming or invalidating. + // if the dependencies exceed the gas limit, then we must invalidate the chain + // as it can never be included. + // Otherwise we can just trim and continue + if depGasLimit > gasLimit { + chain.Invalidate() + last += i + 1 + continue tailLoop + } + + // dependencies fit, just trim it + chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + last += i + continue tailLoop + } + + // the merge loop ended after processing all the chains and we we probably have still + // gas to spare; end the loop. + break + } + if dt := time.Since(startTail); dt > time.Millisecond { + log.Infow("pack tail chains done", "took", dt) + } + + // if we have gasLimit to spare, pick some random (non-negative) chains to fill the block + // we pick randomly so that we minimize the probability of duplication among all miners + if gasLimit >= minGas { + randomCount := 0 + + startRandom := time.Now() + shuffleChains(chains) + + for _, chain := range chains { + // have we filled the block + if gasLimit < minGas { + break + } + + // has it been merged or invalidated? + if chain.merged || !chain.valid { + continue + } + + // is it negative? + if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + continue + } + + // compute the dependencies that must be merged and the gas limit including deps + chainGasLimit := chain.gasLimit + depGasLimit := int64(0) + var chainDeps []*msgChain + for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { + chainDeps = append(chainDeps, curChain) + chainGasLimit += curChain.gasLimit + depGasLimit += curChain.gasLimit + } + + // do the deps fit? if the deps won't fit, invalidate the chain + if depGasLimit > gasLimit { + chain.Invalidate() + continue + } + + // do they fit as is? if it doesn't, trim to make it fit if possible + if chainGasLimit > gasLimit { + chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + + if !chain.valid { + continue + } + } + + // include it together with all dependencies + for i := len(chainDeps) - 1; i >= 0; i-- { + curChain := chainDeps[i] + curChain.merged = true + result = append(result, curChain.msgs...) + randomCount += len(curChain.msgs) + } + + chain.merged = true + result = append(result, chain.msgs...) + randomCount += len(chain.msgs) + gasLimit -= chainGasLimit + } + + if dt := time.Since(startRandom); dt > time.Millisecond { + log.Infow("pack random tail chains done", "took", dt) + } + + if randomCount > 0 { + log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection", + randomCount) + } + } + + return result, nil +} + +func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { + start := time.Now() + + baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) + if err != nil { + return nil, xerrors.Errorf("computing basefee: %w", err) + } + + // 0. Load messages for the target tipset; if it is the same as the current tipset in the mpool + // then this is just the pending messages + pending, err := mp.getPendingMessages(curTs, ts) + if err != nil { + return nil, err + } + + if len(pending) == 0 { + return nil, nil + } + + // defer only here so if we have no pending messages we don't spam + defer func() { + log.Infow("message selection done", "took", time.Since(start)) + }() + + // 0b. Select all priority messages that fit in the block + minGas := int64(gasguess.MinGas) + result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) + + // have we filled the block? + if gasLimit < minGas { + return result, nil + } + + // 1. Create a list of dependent message chains with maximal gas reward per limit consumed + startChains := time.Now() + var chains []*msgChain + for actor, mset := range pending { + next := mp.createMessageChains(actor, mset, baseFee, ts) + chains = append(chains, next...) + } + if dt := time.Since(startChains); dt > time.Millisecond { + log.Infow("create message chains done", "took", dt) + } + + // 2. Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + if !allowNegativeChains(curTs.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 { + log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf) + return result, nil + } + + // 3. Merge the head chains to produce the list of messages selected for inclusion, subject to + // the block gas limit. + startMerge := time.Now() + last := len(chains) + for i, chain := range chains { + // did we run out of performing chains? + if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + break + } + + // does it fit in the block? + if chain.gasLimit <= gasLimit { + gasLimit -= chain.gasLimit + result = append(result, chain.msgs...) + continue + } + + // we can't fit this chain because of block gasLimit -- we are at the edge + last = i + break + } + if dt := time.Since(startMerge); dt > time.Millisecond { + log.Infow("merge message chains done", "took", dt) + } + + // 4. We have reached the edge of what we can fit wholesale; if we still have available gasLimit + // to pack some more chains, then trim the last chain and push it down. + // Trimming invalidates subsequent dependent chains so that they can't be selected as their + // dependency cannot be (fully) included. + // We do this in a loop because the blocker might have been inordinately large and we might + // have to do it multiple times to satisfy tail packing. + startTail := time.Now() +tailLoop: + for gasLimit >= minGas && last < len(chains) { + // trim + chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(curTs.Height())) + + // push down if it hasn't been invalidated + if chains[last].valid { + for i := last; i < len(chains)-1; i++ { + if chains[i].Before(chains[i+1]) { + break + } + chains[i], chains[i+1] = chains[i+1], chains[i] + } + } + + // select the next (valid and fitting) chain for inclusion + for i, chain := range chains[last:] { + // has the chain been invalidated? + if !chain.valid { + continue + } + + // if gasPerf < 0 we have no more profitable chains + if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 { + break tailLoop + } + + // does it fit in the bock? + if chain.gasLimit <= gasLimit { + gasLimit -= chain.gasLimit + result = append(result, chain.msgs...) + continue + } + + // this chain needs to be trimmed + last += i + continue tailLoop + } + + // the merge loop ended after processing all the chains and we probably still have + // gas to spare; end the loop + break + } + if dt := time.Since(startTail); dt > time.Millisecond { + log.Infow("pack tail chains done", "took", dt) + } + + return result, nil +} + +func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { + start := time.Now() + defer func() { + if dt := time.Since(start); dt > time.Millisecond { + log.Infow("select priority messages done", "took", dt) + } + }() + + result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow) + gasLimit := int64(build.BlockGasLimit) + minGas := int64(gasguess.MinGas) + + // 1. Get priority actor chains + var chains []*msgChain + priority := mp.cfg.PriorityAddrs + for _, actor := range priority { + mset, ok := pending[actor] + if ok { + // remove actor from pending set as we are already processed these messages + delete(pending, actor) + // create chains for the priority actor + next := mp.createMessageChains(actor, mset, baseFee, ts) + chains = append(chains, next...) + } + } + + if len(chains) == 0 { + return nil, gasLimit + } + + // 2. Sort the chains + sort.Slice(chains, func(i, j int) bool { + return chains[i].Before(chains[j]) + }) + + if !allowNegativeChains(ts.Height()) && len(chains) != 0 && chains[0].gasPerf < 0 { + log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf) + return nil, gasLimit + } + + // 3. Merge chains until the block limit, as long as they have non-negative gas performance + last := len(chains) + for i, chain := range chains { + if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 { + break + } + + if chain.gasLimit <= gasLimit { + gasLimit -= chain.gasLimit + result = append(result, chain.msgs...) + continue + } + + // we can't fit this chain because of block gasLimit -- we are at the edge + last = i + break + } + +tailLoop: + for gasLimit >= minGas && last < len(chains) { + // trim, discarding negative performing messages + chains[last].Trim(gasLimit, mp, baseFee, allowNegativeChains(ts.Height())) + + // push down if it hasn't been invalidated + if chains[last].valid { + for i := last; i < len(chains)-1; i++ { + if chains[i].Before(chains[i+1]) { + break + } + chains[i], chains[i+1] = chains[i+1], chains[i] + } + } + + // select the next (valid and fitting) chain for inclusion + for i, chain := range chains[last:] { + // has the chain been invalidated + if !chain.valid { + continue + } + + // if gasPerf < 0 we have no more profitable chains + if !allowNegativeChains(ts.Height()) && chain.gasPerf < 0 { + break tailLoop + } + + // does it fit in the bock? + if chain.gasLimit <= gasLimit { + gasLimit -= chain.gasLimit + result = append(result, chain.msgs...) + continue + } + + // this chain needs to be trimmed + last += i + continue tailLoop + } + + // the merge loop ended after processing all the chains and we probably still have gas to spare; + // end the loop + break + } + + return result, gasLimit +} + +func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { + start := time.Now() + + result := make(map[address.Address]map[uint64]*types.SignedMessage) + defer func() { + if dt := time.Since(start); dt > time.Millisecond { + log.Infow("get pending messages done", "took", dt) + } + }() + + // are we in sync? + inSync := false + if curTs.Height() == ts.Height() && curTs.Equals(ts) { + inSync = true + } + + // first add our current pending messages + for a, mset := range mp.pending { + if inSync { + // no need to copy the map + result[a] = mset.msgs + } else { + // we need to copy the map to avoid clobbering it as we load more messages + msetCopy := make(map[uint64]*types.SignedMessage, len(mset.msgs)) + for nonce, m := range mset.msgs { + msetCopy[nonce] = m + } + result[a] = msetCopy + + } + } + + // we are in sync, that's the happy path + if inSync { + return result, nil + } + + if err := mp.runHeadChange(curTs, ts, result); err != nil { + return nil, xerrors.Errorf("failed to process difference between mpool head and given head: %w", err) + } + + return result, nil +} + +func (*MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt) *big.Int { + maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee) + + if types.BigCmp(maxPremium, msg.Message.GasPremium) > 0 { + maxPremium = msg.Message.GasPremium + } + + gasReward := tbig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit))) + return gasReward.Int +} + +func (*MessagePool) getGasPerf(gasReward *big.Int, gasLimit int64) float64 { + // gasPerf = gasReward * build.BlockGasLimit / gasLimit + a := new(big.Rat).SetInt(new(big.Int).Mul(gasReward, bigBlockGasLimit)) + b := big.NewRat(1, gasLimit) + c := new(big.Rat).Mul(a, b) + r, _ := c.Float64() + return r +} + +func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) []*msgChain { + // collect all messages + msgs := make([]*types.SignedMessage, 0, len(mset)) + for _, m := range mset { + msgs = append(msgs, m) + } + + // sort by nonce + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Message.Nonce < msgs[j].Message.Nonce + }) + + // sanity checks: + // - there can be no gaps in nonces, starting from the current actor nonce + // if there is a gap, drop messages after the gap, we can't include them + // - all messages must have minimum gas and the total gas for the candidate messages + // cannot exceed the block limit; drop all messages that exceed the limit + // - the total gasReward cannot exceed the actor's balance; drop all messages that exceed + // the balance + a, err := mp.api.GetActorAfter(actor, ts) + if err != nil { + log.Errorf("failed to load actor state, not building chain for %s: %w", actor, err) + return nil + } + + curNonce := a.Nonce + balance := a.Balance.Int + gasLimit := int64(0) + skip := 0 + i := 0 + rewards := make([]*big.Int, 0, len(msgs)) + for i = 0; i < len(msgs); i++ { + m := msgs[i] + + if m.Message.Nonce < curNonce { + log.Warnf("encountered message from actor %s with nonce (%d) less than the current nonce (%d)", + actor, m.Message.Nonce, curNonce) + skip++ + continue + } + + if m.Message.Nonce != curNonce { + break + } + curNonce++ + + minGas := vm.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total() + if m.Message.GasLimit < minGas { + break + } + + gasLimit += m.Message.GasLimit + if gasLimit > build.BlockGasLimit { + break + } + + required := m.Message.RequiredFunds().Int + if balance.Cmp(required) < 0 { + break + } + balance = new(big.Int).Sub(balance, required) + + value := m.Message.Value.Int + if balance.Cmp(value) >= 0 { + // Note: we only account for the value if the balance doesn't drop below 0 + // otherwise the message will fail and the miner can reap the gas rewards + balance = new(big.Int).Sub(balance, value) + } + + gasReward := mp.getGasReward(m, baseFee) + rewards = append(rewards, gasReward) + } + + // check we have a sane set of messages to construct the chains + if i > skip { + msgs = msgs[skip:i] + } else { + return nil + } + + // ok, now we can construct the chains using the messages we have + // invariant: each chain has a bigger gasPerf than the next -- otherwise they can be merged + // and increase the gasPerf of the first chain + // We do this in two passes: + // - in the first pass we create chains that aggreagate messages with non-decreasing gasPerf + // - in the second pass we merge chains to maintain the invariant. + var chains []*msgChain + var curChain *msgChain + + newChain := func(m *types.SignedMessage, i int) *msgChain { + chain := new(msgChain) + chain.msgs = []*types.SignedMessage{m} + chain.gasReward = rewards[i] + chain.gasLimit = m.Message.GasLimit + chain.gasPerf = mp.getGasPerf(chain.gasReward, chain.gasLimit) + chain.valid = true + return chain + } + + // create the individual chains + for i, m := range msgs { + if curChain == nil { + curChain = newChain(m, i) + continue + } + + gasReward := new(big.Int).Add(curChain.gasReward, rewards[i]) + gasLimit := curChain.gasLimit + m.Message.GasLimit + gasPerf := mp.getGasPerf(gasReward, gasLimit) + + // try to add the message to the current chain -- if it decreases the gasPerf, then make a + // new chain + if gasPerf < curChain.gasPerf { + chains = append(chains, curChain) + curChain = newChain(m, i) + } else { + curChain.msgs = append(curChain.msgs, m) + curChain.gasReward = gasReward + curChain.gasLimit = gasLimit + curChain.gasPerf = gasPerf + } + } + chains = append(chains, curChain) + + // merge chains to maintain the invariant + for { + merged := 0 + + for i := len(chains) - 1; i > 0; i-- { + if chains[i].gasPerf >= chains[i-1].gasPerf { + chains[i-1].msgs = append(chains[i-1].msgs, chains[i].msgs...) + chains[i-1].gasReward = new(big.Int).Add(chains[i-1].gasReward, chains[i].gasReward) + chains[i-1].gasLimit += chains[i].gasLimit + chains[i-1].gasPerf = mp.getGasPerf(chains[i-1].gasReward, chains[i-1].gasLimit) + chains[i].valid = false + merged++ + } + } + + if merged == 0 { + break + } + + // drop invalidated chains + newChains := make([]*msgChain, 0, len(chains)-merged) + for _, c := range chains { + if c.valid { + newChains = append(newChains, c) + } + } + chains = newChains + } + + // link dependent chains + for i := 0; i < len(chains)-1; i++ { + chains[i].next = chains[i+1] + } + + for i := len(chains) - 1; i > 0; i-- { + chains[i].prev = chains[i-1] + } + + return chains +} + +func (mc *msgChain) Before(other *msgChain) bool { + return mc.gasPerf > other.gasPerf || + (mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) +} + +func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, allowNegative bool) { + i := len(mc.msgs) - 1 + for i >= 0 && (mc.gasLimit > gasLimit || (!allowNegative && mc.gasPerf < 0)) { + gasReward := mp.getGasReward(mc.msgs[i], baseFee) + mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward) + mc.gasLimit -= mc.msgs[i].Message.GasLimit + if mc.gasLimit > 0 { + mc.gasPerf = mp.getGasPerf(mc.gasReward, mc.gasLimit) + if mc.bp != 0 { + mc.setEffPerf() + } + } else { + mc.gasPerf = 0 + mc.effPerf = 0 + } + i-- + } + + if i < 0 { + mc.msgs = nil + mc.valid = false + } else { + mc.msgs = mc.msgs[:i+1] + } + + if mc.next != nil { + mc.next.Invalidate() + mc.next = nil + } +} + +func (mc *msgChain) Invalidate() { + mc.valid = false + mc.msgs = nil + if mc.next != nil { + mc.next.Invalidate() + mc.next = nil + } +} + +func (mc *msgChain) SetEffectivePerf(bp float64) { + mc.bp = bp + mc.setEffPerf() +} + +func (mc *msgChain) setEffPerf() { + effPerf := mc.gasPerf * mc.bp + if effPerf > 0 && mc.prev != nil { + effPerfWithParent := (effPerf*float64(mc.gasLimit) + mc.prev.effPerf*float64(mc.prev.gasLimit)) / float64(mc.gasLimit+mc.prev.gasLimit) + mc.parentOffset = effPerf - effPerfWithParent + effPerf = effPerfWithParent + } + mc.effPerf = effPerf + +} + +func (mc *msgChain) SetNullEffectivePerf() { + if mc.gasPerf < 0 { + mc.effPerf = mc.gasPerf + } else { + mc.effPerf = 0 + } +} + +func (mc *msgChain) BeforeEffective(other *msgChain) bool { + // move merged chains to the front so we can discard them earlier + return (mc.merged && !other.merged) || + (mc.gasPerf >= 0 && other.gasPerf < 0) || + mc.effPerf > other.effPerf || + (mc.effPerf == other.effPerf && mc.gasPerf > other.gasPerf) || + (mc.effPerf == other.effPerf && mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) +} + +func shuffleChains(lst []*msgChain) { + for i := range lst { + j := rand.Intn(i + 1) + lst[i], lst[j] = lst[j], lst[i] + } +} diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go new file mode 100644 index 000000000..ea19dad9c --- /dev/null +++ b/chain/messagepool/selection_test.go @@ -0,0 +1,1465 @@ +package messagepool + +import ( + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "os" + "sort" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/messagepool/gasguess" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" + logging "github.com/ipfs/go-log" +) + +func init() { + // bump this for the selection tests + MaxActorPendingMessages = 1000000 +} + +func makeTestMessage(w *wallet.Wallet, from, to address.Address, nonce uint64, gasLimit int64, gasPrice uint64) *types.SignedMessage { + msg := &types.Message{ + From: from, + To: to, + Method: 2, + Value: types.FromFil(0), + Nonce: nonce, + GasLimit: gasLimit, + GasFeeCap: types.NewInt(100 + gasPrice), + GasPremium: types.NewInt(gasPrice), + } + sig, err := w.Sign(context.TODO(), from, msg.Cid().Bytes()) + if err != nil { + panic(err) + } + return &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } +} + +func makeTestMpool() (*MessagePool, *testMpoolAPI) { + tma := newTestMpoolAPI() + ds := datastore.NewMapDatastore() + mp, err := New(tma, ds, "test") + if err != nil { + panic(err) + } + + return mp, tma +} + +func TestMessageChains(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + + // test chain aggregations + + // test1: 10 messages from a1 to a2, with increasing gasPerf; it should + // make a single chain with 10 messages given enough balance + mset := make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + mset[uint64(i)] = m + } + baseFee := types.NewInt(0) + + chains := mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != 10 { + t.Fatalf("expected 10 messages in the chain but got %d", len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test2 : 10 messages from a1 to a2, with decreasing gasPerf; it should + // make 10 chains with 1 message each + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(10-i)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 10 { + t.Fatal("expected 10 chains") + } + for i, chain := range chains { + if len(chain.msgs) != 1 { + t.Fatalf("expected 1 message in chain %d but got %d", i, len(chain.msgs)) + } + } + for i, chain := range chains { + m := chain.msgs[0] + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test3a: 10 messages from a1 to a2, with gasPerf increasing in groups of 3; it should + // merge them in two chains, one with 9 messages and one with the last message + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 2 { + t.Fatal("expected 1 chain") + } + + if len(chains[0].msgs) != 9 { + t.Fatalf("expected 9 messages in the chain but got %d", len(chains[0].msgs)) + } + if len(chains[1].msgs) != 1 { + t.Fatalf("expected 1 messages in the chain but got %d", len(chains[1].msgs)) + } + nextNonce := 0 + for _, chain := range chains { + for _, m := range chain.msgs { + if m.Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + } + + // test3b: 10 messages from a1 to a2, with gasPerf decreasing in groups of 3 with a bias for the + // earlier chains; it should make 4 chains, the first 3 with 3 messages and the last with + // a single message + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + bias := (12 - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 4 { + t.Fatal("expected 4 chains") + } + for i, chain := range chains { + expectedLen := 3 + if i > 2 { + expectedLen = 1 + } + if len(chain.msgs) != expectedLen { + t.Fatalf("expected %d message in chain %d but got %d", expectedLen, i, len(chain.msgs)) + } + } + nextNonce = 0 + for _, chain := range chains { + for _, m := range chain.msgs { + if m.Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + } + + // test chain breaks + + // test4: 10 messages with non-consecutive nonces; it should make a single chain with just + // the first message + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i*2), gasLimit, uint64(i+1)) + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != 1 { + t.Fatalf("expected 1 message in the chain but got %d", len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test5: 10 messages with increasing gasLimit, except for the 6th message which has less than + // the epoch gasLimit; it should create a single chain with the first 5 messages + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + var m *types.SignedMessage + if i != 5 { + m = makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + } else { + m = makeTestMessage(w1, a1, a2, uint64(i), 1, uint64(i+1)) + } + mset[uint64(i)] = m + } + + chains = mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != 5 { + t.Fatalf("expected 5 message in the chain but got %d", len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test6: one more message than what can fit in a block according to gas limit, with increasing + // gasPerf; it should create a single chain with the max messages + maxMessages := int(build.BlockGasLimit / gasLimit) + nMessages := maxMessages + 1 + + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < nMessages; i++ { + mset[uint64(i)] = makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + } + + chains = mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatal("expected a single chain") + } + if len(chains[0].msgs) != maxMessages { + t.Fatalf("expected %d message in the chain but got %d", maxMessages, len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + + // test5: insufficient balance for all messages + tma.setBalanceRaw(a1, types.NewInt(uint64((300)*gasLimit+1))) + + mset = make(map[uint64]*types.SignedMessage) + for i := 0; i < 10; i++ { + mset[uint64(i)] = makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + } + + chains = mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 1 { + t.Fatalf("expected a single chain: got %d", len(chains)) + } + if len(chains[0].msgs) != 2 { + t.Fatalf("expected %d message in the chain but got %d", 2, len(chains[0].msgs)) + } + for i, m := range chains[0].msgs { + if m.Message.Nonce != uint64(i) { + t.Fatalf("expected nonce %d but got %d", i, m.Message.Nonce) + } + } + +} + +func TestMessageChainSkipping(t *testing.T) { + // regression test for chain skip bug + + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + baseFee := types.NewInt(0) + + tma.setBalance(a1, 1) // in FIL + tma.setStateNonce(a1, 10) + + mset := make(map[uint64]*types.SignedMessage) + for i := 0; i < 20; i++ { + bias := (20 - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mset[uint64(i)] = m + } + + chains := mp.createMessageChains(a1, mset, baseFee, ts) + if len(chains) != 4 { + t.Fatalf("expected 4 chains, got %d", len(chains)) + } + for i, chain := range chains { + var expectedLen int + switch { + case i == 0: + expectedLen = 2 + case i > 2: + expectedLen = 2 + default: + expectedLen = 3 + } + if len(chain.msgs) != expectedLen { + t.Fatalf("expected %d message in chain %d but got %d", expectedLen, i, len(chain.msgs)) + } + } + nextNonce := 10 + for _, chain := range chains { + for _, m := range chain.msgs { + if m.Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + } + +} + +func TestBasicMessageSelection(t *testing.T) { + oldMaxNonceGap := MaxNonceGap + MaxNonceGap = 1000 + defer func() { + MaxNonceGap = oldMaxNonceGap + }() + + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // we create 10 messages from each actor to another, with the first actor paying higher + // gas prices than the second; we expect message selection to order his messages first + for i := 0; i < 10; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+1)) + mustAdd(t, mp, m) + } + + for i := 0; i < 10; i++ { + m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(ts, 1.0) + if err != nil { + t.Fatal(err) + } + + if len(msgs) != 20 { + t.Fatalf("exptected 20 messages, got %d", len(msgs)) + } + + nextNonce := 0 + for i := 0; i < 10; i++ { + if msgs[i].Message.From != a1 { + t.Fatalf("expected message from actor a1") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } + + nextNonce = 0 + for i := 10; i < 20; i++ { + if msgs[i].Message.From != a2 { + t.Fatalf("expected message from actor a2") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } + + // now we make a block with all the messages and advance the chain + block2 := tma.nextBlock() + tma.setBlockMessages(block2, msgs...) + tma.applyBlock(t, block2) + + // we should have no pending messages in the mpool + pend, _ := mp.Pending() + if len(pend) != 0 { + t.Fatalf("expected no pending messages, but got %d", len(pend)) + } + + // create a block and advance the chain without applying to the mpool + msgs = nil + for i := 10; i < 20; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+1)) + msgs = append(msgs, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + msgs = append(msgs, m) + } + block3 := tma.nextBlock() + tma.setBlockMessages(block3, msgs...) + ts3 := mock.TipSet(block3) + + // now create another set of messages and add them to the mpool + for i := 20; i < 30; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+200)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) + mustAdd(t, mp, m) + } + + // select messages in the last tipset; this should include the missed messages as well as + // the last messages we added, with the first actor's messages first + // first we need to update the nonce on the tma + tma.setStateNonce(a1, 10) + tma.setStateNonce(a2, 10) + + msgs, err = mp.SelectMessages(ts3, 1.0) + if err != nil { + t.Fatal(err) + } + if len(msgs) != 20 { + t.Fatalf("expected 20 messages, got %d", len(msgs)) + } + + nextNonce = 20 + for i := 0; i < 10; i++ { + if msgs[i].Message.From != a1 { + t.Fatalf("expected message from actor a1") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } + + nextNonce = 20 + for i := 10; i < 20; i++ { + if msgs[i].Message.From != a2 { + t.Fatalf("expected message from actor a2") + } + if msgs[i].Message.Nonce != uint64(nextNonce) { + t.Fatalf("expected nonce %d, got %d", msgs[i].Message.Nonce, nextNonce) + } + nextNonce++ + } +} + +func TestMessageSelectionTrimming(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // make many small chains for the two actors + nMessages := int((build.BlockGasLimit / gasLimit) + 1) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expected := int(build.BlockGasLimit / gasLimit) + if len(msgs) != expected { + t.Fatalf("expected %d messages, bug got %d", expected, len(msgs)) + } + + mGasLimit := int64(0) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + } + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + +} + +func TestPriorityMessageSelection(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + mp.cfg.PriorityAddrs = []address.Address{a1} + + nMessages := 10 + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(ts, 1.0) + if err != nil { + t.Fatal(err) + } + + if len(msgs) != 20 { + t.Fatalf("expected 20 messages but got %d", len(msgs)) + } + + // messages from a1 must be first + nextNonce := uint64(0) + for i := 0; i < 10; i++ { + m := msgs[i] + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + + nextNonce = 0 + for i := 10; i < 20; i++ { + m := msgs[i] + if m.Message.From != a2 { + t.Fatal("expected messages from a2 after messages from a1") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } +} + +func TestPriorityMessageSelection2(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + mp.cfg.PriorityAddrs = []address.Address{a1} + + nMessages := int(2 * build.BlockGasLimit / gasLimit) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(build.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs)) + } + + // all messages must be from a1 + nextNonce := uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } +} + +func TestPriorityMessageSelection3(t *testing.T) { + t.Skip("reenable after removing allow negative") + + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + mp.cfg.PriorityAddrs = []address.Address{a1} + + tma.baseFee = types.NewInt(1000) + nMessages := 10 + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1000+i%3+bias)) + mustAdd(t, mp, m) + // messages from a2 have negative performance + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, 100) + mustAdd(t, mp, m) + } + + // test greedy selection + msgs, err := mp.SelectMessages(ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := 10 + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs)) + } + + // all messages must be from a1 + nextNonce := uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + + // test optimal selection + msgs, err = mp.SelectMessages(ts, 0.1) + if err != nil { + t.Fatal(err) + } + + expectedMsgs = 10 + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs)) + } + + // all messages must be from a1 + nextNonce = uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected messages from a1 before messages from a2") + } + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } + +} + +func TestOptimalMessageSelection1(t *testing.T) { + // this test uses just a single actor sending messages with a low tq + // the chain depenent merging algorithm should pick messages from the actor + // from the start + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + nMessages := int(10 * build.BlockGasLimit / gasLimit) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(ts, 0.25) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(build.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) + } + + nextNonce := uint64(0) + for _, m := range msgs { + if m.Message.From != a1 { + t.Fatal("expected message from a1") + } + + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nextNonce++ + } +} + +func TestOptimalMessageSelection2(t *testing.T) { + // this test uses two actors sending messages to each other, with the first + // actor paying (much) higher gas premium than the second. + // We select with a low ticket quality; the chain depenent merging algorithm should pick + // messages from the second actor from the start + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + nMessages := int(5 * build.BlockGasLimit / gasLimit) + for i := 0; i < nMessages; i++ { + bias := (nMessages - i) / 3 + m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(200000+i%3+bias)) + mustAdd(t, mp, m) + m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(190000+i%3+bias)) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(ts, 0.1) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(build.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) + } + + var nFrom1, nFrom2 int + var nextNonce1, nextNonce2 uint64 + for _, m := range msgs { + if m.Message.From == a1 { + if m.Message.Nonce != nextNonce1 { + t.Fatalf("expected nonce %d but got %d", nextNonce1, m.Message.Nonce) + } + nextNonce1++ + nFrom1++ + } else { + if m.Message.Nonce != nextNonce2 { + t.Fatalf("expected nonce %d but got %d", nextNonce2, m.Message.Nonce) + } + nextNonce2++ + nFrom2++ + } + } + + if nFrom1 > nFrom2 { + t.Fatalf("expected more messages from a2 than a1; nFrom1=%d nFrom2=%d", nFrom1, nFrom2) + } +} + +func TestOptimalMessageSelection3(t *testing.T) { + // this test uses 10 actors sending a block of messages to each other, with the the first + // actors paying higher gas premium than the subsequent actors. + // We select with a low ticket quality; the chain depenent merging algorithm should pick + // messages from the median actor from the start + mp, tma := makeTestMpool() + + nActors := 10 + // the actors + var actors []address.Address + var wallets []*wallet.Wallet + + for i := 0; i < nActors; i++ { + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a, err := w.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + actors = append(actors, a) + wallets = append(wallets, w) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + + for _, a := range actors { + tma.setBalance(a, 1) // in FIL + } + + nMessages := int(build.BlockGasLimit/gasLimit) + 1 + for i := 0; i < nMessages; i++ { + for j := 0; j < nActors; j++ { + premium := 500000 + 10000*(nActors-j) + (nMessages+2-i)/(30*nActors) + i%3 + m := makeTestMessage(wallets[j], actors[j], actors[j%nActors], uint64(i), gasLimit, uint64(premium)) + mustAdd(t, mp, m) + } + } + + msgs, err := mp.SelectMessages(ts, 0.1) + if err != nil { + t.Fatal(err) + } + + expectedMsgs := int(build.BlockGasLimit / gasLimit) + if len(msgs) != expectedMsgs { + t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) + } + + whoIs := func(a address.Address) int { + for i, aa := range actors { + if a == aa { + return i + } + } + return -1 + } + + nonces := make([]uint64, nActors) + for _, m := range msgs { + who := whoIs(m.Message.From) + if who < 3 { + t.Fatalf("got message from %dth actor", who) + } + + nextNonce := nonces[who] + if m.Message.Nonce != nextNonce { + t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce) + } + nonces[who]++ + } +} + +func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium func() uint64) (float64, float64, float64) { + // in this test we use 300 actors and send 10 blocks of messages. + // actors send with an randomly distributed premium dictated by the getPremium function. + // a number of miners select with varying ticket quality and we compare the + // capacity and rewards of greedy selection -vs- optimal selection + mp, tma := makeTestMpool() + + nActors := 300 + // the actors + var actors []address.Address + var wallets []*wallet.Wallet + + for i := 0; i < nActors; i++ { + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a, err := w.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + actors = append(actors, a) + wallets = append(wallets, w) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}] + baseFee := types.NewInt(0) + + for _, a := range actors { + tma.setBalance(a, 1) // in FIL + } + + nMessages := 10 * int(build.BlockGasLimit/gasLimit) + t.Log("nMessages", nMessages) + nonces := make([]uint64, nActors) + for i := 0; i < nMessages; i++ { + from := rng.Intn(nActors) + to := rng.Intn(nActors) + nonce := nonces[from] + nonces[from]++ + premium := getPremium() + m := makeTestMessage(wallets[from], actors[from], actors[to], nonce, gasLimit, premium) + mustAdd(t, mp, m) + } + + logging.SetLogLevel("messagepool", "error") + + // 1. greedy selection + greedyMsgs, err := mp.selectMessagesGreedy(ts, ts) + if err != nil { + t.Fatal(err) + } + + totalGreedyCapacity := 0.0 + totalGreedyReward := 0.0 + totalOptimalCapacity := 0.0 + totalOptimalReward := 0.0 + totalBestTQReward := 0.0 + const runs = 1 + for i := 0; i < runs; i++ { + // 2. optimal selection + minersRand := rng.Float64() + winerProba := noWinnersProb() + i := 0 + for ; i < MaxBlocks && minersRand > 0; i++ { + minersRand -= winerProba[i] + } + nMiners := i - 1 + if nMiners < 1 { + nMiners = 1 + } + + optMsgs := make(map[cid.Cid]*types.SignedMessage) + bestTq := 0.0 + var bestMsgs []*types.SignedMessage + for j := 0; j < nMiners; j++ { + tq := rng.Float64() + msgs, err := mp.SelectMessages(ts, tq) + if err != nil { + t.Fatal(err) + } + if tq > bestTq { + bestMsgs = msgs + } + + for _, m := range msgs { + optMsgs[m.Cid()] = m + } + } + + totalGreedyCapacity += float64(len(greedyMsgs)) + totalOptimalCapacity += float64(len(optMsgs)) + boost := float64(len(optMsgs)) / float64(len(greedyMsgs)) + + t.Logf("nMiners: %d", nMiners) + t.Logf("greedy capacity %d, optimal capacity %d (x %.1f )", len(greedyMsgs), + len(optMsgs), boost) + if len(greedyMsgs) > len(optMsgs) { + t.Errorf("greedy capacity higher than optimal capacity; wtf") + } + + greedyReward := big.NewInt(0) + for _, m := range greedyMsgs { + greedyReward.Add(greedyReward, mp.getGasReward(m, baseFee)) + } + + optReward := big.NewInt(0) + for _, m := range optMsgs { + optReward.Add(optReward, mp.getGasReward(m, baseFee)) + } + + bestTqReward := big.NewInt(0) + for _, m := range bestMsgs { + bestTqReward.Add(bestTqReward, mp.getGasReward(m, baseFee)) + } + + totalBestTQReward += float64(bestTqReward.Uint64()) + + nMinersBig := big.NewInt(int64(nMiners)) + greedyAvgReward, _ := new(big.Rat).SetFrac(greedyReward, nMinersBig).Float64() + totalGreedyReward += greedyAvgReward + optimalAvgReward, _ := new(big.Rat).SetFrac(optReward, nMinersBig).Float64() + totalOptimalReward += optimalAvgReward + + boost = optimalAvgReward / greedyAvgReward + t.Logf("greedy reward: %.0f, optimal reward: %.0f (x %.1f )", greedyAvgReward, + optimalAvgReward, boost) + + } + + capacityBoost := totalOptimalCapacity / totalGreedyCapacity + rewardBoost := totalOptimalReward / totalGreedyReward + t.Logf("Average capacity boost: %f", capacityBoost) + t.Logf("Average reward boost: %f", rewardBoost) + t.Logf("Average best tq reward: %f", totalBestTQReward/runs/1e12) + + logging.SetLogLevel("messagepool", "info") + + return capacityBoost, rewardBoost, totalBestTQReward / runs / 1e12 +} + +func makeExpPremiumDistribution(rng *rand.Rand) func() uint64 { + return func() uint64 { + premium := 20000*math.Exp(-3.*rng.Float64()) + 5000 + return uint64(premium) + } +} + +func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 { + zipf := rand.NewZipf(rng, 1.001, 1, 40000) + return func() uint64 { + return zipf.Uint64() + 10000 + } +} + +func TestCompetitiveMessageSelectionExp(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + var capacityBoost, rewardBoost, tqReward float64 + seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45} + for _, seed := range seeds { + t.Log("running competitive message selection with Exponential premium distribution and seed", seed) + rng := rand.New(rand.NewSource(seed)) + cb, rb, tqR := testCompetitiveMessageSelection(t, rng, makeExpPremiumDistribution(rng)) + capacityBoost += cb + rewardBoost += rb + tqReward += tqR + } + + capacityBoost /= float64(len(seeds)) + rewardBoost /= float64(len(seeds)) + tqReward /= float64(len(seeds)) + t.Logf("Average capacity boost across all seeds: %f", capacityBoost) + t.Logf("Average reward boost across all seeds: %f", rewardBoost) + t.Logf("Average reward of best ticket across all seeds: %f", tqReward) +} + +func TestCompetitiveMessageSelectionZipf(t *testing.T) { + var capacityBoost, rewardBoost, tqReward float64 + seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45} + for _, seed := range seeds { + t.Log("running competitive message selection with Zipf premium distribution and seed", seed) + rng := rand.New(rand.NewSource(seed)) + cb, rb, tqR := testCompetitiveMessageSelection(t, rng, makeZipfPremiumDistribution(rng)) + capacityBoost += cb + rewardBoost += rb + tqReward += tqR + } + + tqReward /= float64(len(seeds)) + capacityBoost /= float64(len(seeds)) + rewardBoost /= float64(len(seeds)) + t.Logf("Average capacity boost across all seeds: %f", capacityBoost) + t.Logf("Average reward boost across all seeds: %f", rewardBoost) + t.Logf("Average reward of best ticket across all seeds: %f", tqReward) +} + +func TestGasReward(t *testing.T) { + tests := []struct { + Premium uint64 + FeeCap uint64 + BaseFee uint64 + GasReward int64 + }{ + {Premium: 100, FeeCap: 200, BaseFee: 100, GasReward: 100}, + {Premium: 100, FeeCap: 200, BaseFee: 210, GasReward: -10}, + {Premium: 200, FeeCap: 250, BaseFee: 210, GasReward: 40}, + {Premium: 200, FeeCap: 250, BaseFee: 2000, GasReward: -1750}, + } + + mp := new(MessagePool) + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { + msg := &types.SignedMessage{ + Message: types.Message{ + GasLimit: 10, + GasFeeCap: types.NewInt(test.FeeCap), + GasPremium: types.NewInt(test.Premium), + }, + } + rew := mp.getGasReward(msg, types.NewInt(test.BaseFee)) + if rew.Cmp(big.NewInt(test.GasReward*10)) != 0 { + t.Errorf("bad reward: expected %d, got %s", test.GasReward*10, rew) + } + }) + } +} + +func TestRealWorldSelection(t *testing.T) { + // load test-messages.json.gz and rewrite the messages so that + // 1) we map each real actor to a test actor so that we can sign the messages + // 2) adjust the nonces so that they start from 0 + file, err := os.Open("test-messages.json.gz") + if err != nil { + t.Fatal(err) + } + + gzr, err := gzip.NewReader(file) + if err != nil { + t.Fatal(err) + } + + dec := json.NewDecoder(gzr) + + var msgs []*types.SignedMessage + baseNonces := make(map[address.Address]uint64) + +readLoop: + for { + m := new(types.SignedMessage) + err := dec.Decode(m) + switch err { + case nil: + msgs = append(msgs, m) + nonce, ok := baseNonces[m.Message.From] + if !ok || m.Message.Nonce < nonce { + baseNonces[m.Message.From] = m.Message.Nonce + } + + case io.EOF: + break readLoop + + default: + t.Fatal(err) + } + } + + actorMap := make(map[address.Address]address.Address) + actorWallets := make(map[address.Address]*wallet.Wallet) + + for _, m := range msgs { + baseNonce := baseNonces[m.Message.From] + + localActor, ok := actorMap[m.Message.From] + if !ok { + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a, err := w.GenerateKey(crypto.SigTypeSecp256k1) + if err != nil { + t.Fatal(err) + } + + actorMap[m.Message.From] = a + actorWallets[a] = w + localActor = a + } + + w, ok := actorWallets[localActor] + if !ok { + t.Fatalf("failed to lookup wallet for actor %s", localActor) + } + + m.Message.From = localActor + m.Message.Nonce -= baseNonce + + sig, err := w.Sign(context.TODO(), localActor, m.Message.Cid().Bytes()) + if err != nil { + t.Fatal(err) + } + + m.Signature = *sig + } + + mp, tma := makeTestMpool() + + block := tma.nextBlockWithHeight(build.UpgradeBreezeHeight + 10) + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + for _, a := range actorMap { + tma.setBalance(a, 1000000) + } + + tma.baseFee = types.NewInt(800_000_000) + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Message.Nonce < msgs[j].Message.Nonce + }) + + // add the messages + for _, m := range msgs { + mustAdd(t, mp, m) + } + + // do message selection and check block packing + minGasLimit := int64(0.9 * float64(build.BlockGasLimit)) + + // greedy first + selected, err := mp.SelectMessages(ts, 1.0) + if err != nil { + t.Fatal(err) + } + + gasLimit := int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=1.0; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // high quality ticket + selected, err = mp.SelectMessages(ts, .8) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.8; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // mid quality ticket + selected, err = mp.SelectMessages(ts, .4) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.4; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // low quality ticket + selected, err = mp.SelectMessages(ts, .1) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.1; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // very low quality ticket + selected, err = mp.SelectMessages(ts, .01) + if err != nil { + t.Fatal(err) + } + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.01; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + +} diff --git a/chain/messagepool/test-messages.json.gz b/chain/messagepool/test-messages.json.gz new file mode 100644 index 000000000..09481e1f8 Binary files /dev/null and b/chain/messagepool/test-messages.json.gz differ diff --git a/chain/metrics/consensus.go b/chain/metrics/consensus.go index b0352860e..c3c4a10d1 100644 --- a/chain/metrics/consensus.go +++ b/chain/metrics/consensus.go @@ -3,13 +3,14 @@ package metrics import ( "context" "encoding/json" - "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" pubsub "github.com/libp2p/go-libp2p-pubsub" "go.uber.org/fx" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -43,7 +44,7 @@ func SendHeadNotifs(nickname string) func(mctx helpers.MetricsCtx, lc fx.Lifecyc } }() go func() { - sub, err := ps.Subscribe(topic) + sub, err := ps.Subscribe(topic) //nolint if err != nil { return } @@ -68,7 +69,7 @@ type message struct { // TipSet Cids []cid.Cid Blocks []*types.BlockHeader - Height uint64 + Height abi.ChainEpoch Weight types.BigInt Time uint64 Nonce uint64 @@ -88,14 +89,14 @@ func sendHeadNotifs(ctx context.Context, ps *pubsub.PubSub, topic string, chain } // using unix nano time makes very sure we pick a nonce higher than previous restart - nonce := uint64(time.Now().UnixNano()) + nonce := uint64(build.Clock.Now().UnixNano()) for { select { case notif := <-notifs: n := notif[len(notif)-1] - w, err := chain.ChainTipSetWeight(ctx, n.Val) + w, err := chain.ChainTipSetWeight(ctx, n.Val.Key()) if err != nil { return err } @@ -106,7 +107,7 @@ func sendHeadNotifs(ctx context.Context, ps *pubsub.PubSub, topic string, chain Height: n.Val.Height(), Weight: w, NodeName: nickname, - Time: uint64(time.Now().UnixNano() / 1000_000), + Time: uint64(build.Clock.Now().UnixNano() / 1000_000), Nonce: nonce, } @@ -115,6 +116,7 @@ func sendHeadNotifs(ctx context.Context, ps *pubsub.PubSub, topic string, chain return err } + //nolint if err := ps.Publish(topic, b); err != nil { return err } diff --git a/chain/state/statetree.go b/chain/state/statetree.go index fc3da47e4..fe932bfa1 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -1,48 +1,176 @@ package state import ( + "bytes" "context" "fmt" "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - logging "github.com/ipfs/go-log" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + "go.opencensus.io/trace" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/builtin" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" ) var log = logging.Logger("statetree") +// StateTree stores actors state by their ID. type StateTree struct { - root *hamt.Node - Store *hamt.CborIpldStore + root adt.Map + version builtin.Version // TODO + info cid.Cid + Store cbor.IpldStore - actorcache map[address.Address]*types.Actor - snapshot cid.Cid + snaps *stateSnaps } -func NewStateTree(cst *hamt.CborIpldStore) (*StateTree, error) { +type stateSnaps struct { + layers []*stateSnapLayer +} + +type stateSnapLayer struct { + actors map[address.Address]streeOp + resolveCache map[address.Address]address.Address +} + +func newStateSnapLayer() *stateSnapLayer { + return &stateSnapLayer{ + actors: make(map[address.Address]streeOp), + resolveCache: make(map[address.Address]address.Address), + } +} + +type streeOp struct { + Act types.Actor + Delete bool +} + +func newStateSnaps() *stateSnaps { + ss := &stateSnaps{} + ss.addLayer() + return ss +} + +func (ss *stateSnaps) addLayer() { + ss.layers = append(ss.layers, newStateSnapLayer()) +} + +func (ss *stateSnaps) dropLayer() { + ss.layers[len(ss.layers)-1] = nil // allow it to be GCed + ss.layers = ss.layers[:len(ss.layers)-1] +} + +func (ss *stateSnaps) mergeLastLayer() { + last := ss.layers[len(ss.layers)-1] + nextLast := ss.layers[len(ss.layers)-2] + + for k, v := range last.actors { + nextLast.actors[k] = v + } + + for k, v := range last.resolveCache { + nextLast.resolveCache[k] = v + } + + ss.dropLayer() +} + +func (ss *stateSnaps) resolveAddress(addr address.Address) (address.Address, bool) { + for i := len(ss.layers) - 1; i >= 0; i-- { + resa, ok := ss.layers[i].resolveCache[addr] + if ok { + return resa, true + } + } + return address.Undef, false +} + +func (ss *stateSnaps) cacheResolveAddress(addr, resa address.Address) { + ss.layers[len(ss.layers)-1].resolveCache[addr] = resa +} + +func (ss *stateSnaps) getActor(addr address.Address) (*types.Actor, error) { + for i := len(ss.layers) - 1; i >= 0; i-- { + act, ok := ss.layers[i].actors[addr] + if ok { + if act.Delete { + return nil, types.ErrActorNotFound + } + + return &act.Act, nil + } + } + return nil, nil +} + +func (ss *stateSnaps) setActor(addr address.Address, act *types.Actor) { + ss.layers[len(ss.layers)-1].actors[addr] = streeOp{Act: *act} +} + +func (ss *stateSnaps) deleteActor(addr address.Address) { + ss.layers[len(ss.layers)-1].actors[addr] = streeOp{Delete: true} +} + +func NewStateTree(cst cbor.IpldStore, version builtin.Version) (*StateTree, error) { + var info cid.Cid + switch version { + case builtin.Version0: + // info is undefined + default: + return nil, xerrors.Errorf("unsupported state tree version: %d", version) + } + root, err := adt.NewMap(adt.WrapStore(context.TODO(), cst), version) + if err != nil { + return nil, err + } + return &StateTree{ - root: hamt.NewNode(cst), - Store: cst, - actorcache: make(map[address.Address]*types.Actor), + root: root, + info: info, + version: version, + Store: cst, + snaps: newStateSnaps(), }, nil } -func LoadStateTree(cst *hamt.CborIpldStore, c cid.Cid) (*StateTree, error) { - nd, err := hamt.LoadNode(context.Background(), cst, c) +func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { + var root types.StateRoot + // Try loading as a new-style state-tree (version/actors tuple). + if err := cst.Get(context.TODO(), c, &root); err != nil { + // We failed to decode as the new version, must be an old version. + root.Actors = c + root.Version = builtin.Version0 + } + + // If that fails, load as an old-style state-tree (direct hampt, version 0. + nd, err := adt.AsMap(adt.WrapStore(context.TODO(), cst), root.Actors, builtin.Version(root.Version)) if err != nil { log.Errorf("loading hamt node %s failed: %s", c, err) return nil, err } + switch root.Version { + case builtin.Version0: + // supported + default: + return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) + } + return &StateTree{ - root: nd, - Store: cst, - actorcache: make(map[address.Address]*types.Actor), + root: nd, + info: root.Info, + version: builtin.Version(root.Version), + Store: cst, + snaps: newStateSnaps(), }, nil } @@ -53,109 +181,165 @@ func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error { } addr = iaddr - cact, ok := st.actorcache[addr] - if ok { - if act == cact { - return nil - } - } - - st.actorcache[addr] = act - - return st.root.Set(context.TODO(), string(addr.Bytes()), act) + st.snaps.setActor(addr, act) + return nil } +// LookupID gets the ID address of this actor's `addr` stored in the `InitActor`. func (st *StateTree) LookupID(addr address.Address) (address.Address, error) { if addr.Protocol() == address.ID { return addr, nil } - act, err := st.GetActor(actors.InitAddress) + resa, ok := st.snaps.resolveAddress(addr) + if ok { + return resa, nil + } + + act, err := st.GetActor(init_.Address) if err != nil { return address.Undef, xerrors.Errorf("getting init actor: %w", err) } - var ias actors.InitActorState - if err := st.Store.Get(context.TODO(), act.Head, &ias); err != nil { + ias, err := init_.Load(&AdtStore{st.Store}, act) + if err != nil { return address.Undef, xerrors.Errorf("loading init actor state: %w", err) } - return ias.Lookup(st.Store, addr) + a, found, err := ias.ResolveAddress(addr) + if err == nil && !found { + err = types.ErrActorNotFound + } + if err != nil { + return address.Undef, xerrors.Errorf("resolve address %s: %w", addr, err) + } + + st.snaps.cacheResolveAddress(addr, a) + + return a, nil } +// GetActor returns the actor from any type of `addr` provided. func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) { if addr == address.Undef { return nil, fmt.Errorf("GetActor called on undefined address") } + // Transform `addr` to its ID format. iaddr, err := st.LookupID(addr) if err != nil { - if xerrors.Is(err, hamt.ErrNotFound) { - return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, types.ErrActorNotFound) + if xerrors.Is(err, types.ErrActorNotFound) { + return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) } return nil, xerrors.Errorf("address resolution: %w", err) } addr = iaddr - cact, ok := st.actorcache[addr] - if ok { - return cact, nil + snapAct, err := st.snaps.getActor(addr) + if err != nil { + return nil, err + } + + if snapAct != nil { + return snapAct, nil } var act types.Actor - err = st.root.Find(context.TODO(), string(addr.Bytes()), &act) - if err != nil { - if err == hamt.ErrNotFound { - return nil, types.ErrActorNotFound - } + if found, err := st.root.Get(abi.AddrKey(addr), &act); err != nil { return nil, xerrors.Errorf("hamt find failed: %w", err) + } else if !found { + return nil, types.ErrActorNotFound } - st.actorcache[addr] = &act + st.snaps.setActor(addr, &act) return &act, nil } -func (st *StateTree) Flush() (cid.Cid, error) { - for addr, act := range st.actorcache { - if err := st.root.Set(context.TODO(), string(addr.Bytes()), act); err != nil { - return cid.Undef, err +func (st *StateTree) DeleteActor(addr address.Address) error { + if addr == address.Undef { + return xerrors.Errorf("DeleteActor called on undefined address") + } + + iaddr, err := st.LookupID(addr) + if err != nil { + if xerrors.Is(err, types.ErrActorNotFound) { + return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) } - } - st.actorcache = make(map[address.Address]*types.Actor) - - if err := st.root.Flush(context.TODO()); err != nil { - return cid.Undef, err + return xerrors.Errorf("address resolution: %w", err) } - return st.Store.Put(context.TODO(), st.root) -} + addr = iaddr -func (st *StateTree) Snapshot() error { - ss, err := st.Flush() + _, err = st.GetActor(addr) if err != nil { return err } - st.snapshot = ss + st.snaps.deleteActor(addr) + return nil } -func (st *StateTree) RegisterNewAddress(addr address.Address, act *types.Actor) (address.Address, error) { +func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) { + ctx, span := trace.StartSpan(ctx, "stateTree.Flush") //nolint:staticcheck + defer span.End() + if len(st.snaps.layers) != 1 { + return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack") + } + + for addr, sto := range st.snaps.layers[0].actors { + if sto.Delete { + if err := st.root.Delete(abi.AddrKey(addr)); err != nil { + return cid.Undef, err + } + } else { + if err := st.root.Put(abi.AddrKey(addr), &sto.Act); err != nil { + return cid.Undef, err + } + } + } + + root, err := st.root.Root() + if err != nil { + return cid.Undef, xerrors.Errorf("failed to flush state-tree hamt: %w", err) + } + // If we're version 0, return a raw tree. + if st.version == builtin.Version0 { + return root, nil + } + // Otherwise, return a versioned tree. + return st.Store.Put(ctx, &types.StateRoot{Version: uint64(st.version), Actors: root, Info: st.info}) +} + +func (st *StateTree) Snapshot(ctx context.Context) error { + ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") //nolint:staticcheck + defer span.End() + + st.snaps.addLayer() + + return nil +} + +func (st *StateTree) ClearSnapshot() { + st.snaps.mergeLastLayer() +} + +func (st *StateTree) RegisterNewAddress(addr address.Address) (address.Address, error) { var out address.Address - err := st.MutateActor(actors.InitAddress, func(initact *types.Actor) error { - var ias actors.InitActorState - if err := st.Store.Get(context.TODO(), initact.Head, &ias); err != nil { + err := st.MutateActor(init_.Address, func(initact *types.Actor) error { + ias, err := init_.Load(&AdtStore{st.Store}, initact) + if err != nil { return err } - oaddr, err := ias.AddActor(st.Store, addr) + oaddr, err := ias.MapAddressToNewID(addr) if err != nil { return err } out = oaddr - ncid, err := st.Store.Put(context.TODO(), &ias) + ncid, err := st.Store.Put(context.TODO(), ias) if err != nil { return err } @@ -167,20 +351,21 @@ func (st *StateTree) RegisterNewAddress(addr address.Address, act *types.Actor) return address.Undef, err } - if err := st.SetActor(out, act); err != nil { - return address.Undef, err - } - return out, nil } -func (st *StateTree) Revert() error { - nd, err := hamt.LoadNode(context.Background(), st.Store, st.snapshot) - if err != nil { - return err - } +type AdtStore struct{ cbor.IpldStore } + +func (a *AdtStore) Context() context.Context { + return context.TODO() +} + +var _ adt.Store = (*AdtStore)(nil) + +func (st *StateTree) Revert() error { + st.snaps.dropLayer() + st.snaps.addLayer() - st.root = nd return nil } @@ -196,3 +381,62 @@ func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) erro return st.SetActor(addr, act) } + +func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error { + var act types.Actor + return st.root.ForEach(&act, func(k string) error { + act := act // copy + addr, err := address.NewFromBytes([]byte(k)) + if err != nil { + return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err) + } + + return f(addr, &act) + }) +} + +// Version returns the version of the StateTree data structure in use. +func (st *StateTree) Version() builtin.Version { + return st.version +} + +func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) { + out := map[string]types.Actor{} + + var ( + ncval, ocval cbg.Deferred + buf = bytes.NewReader(nil) + ) + if err := newTree.root.ForEach(&ncval, func(k string) error { + var act types.Actor + + addr, err := address.NewFromBytes([]byte(k)) + if err != nil { + return xerrors.Errorf("address in state tree was not valid: %w", err) + } + + found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) + if err != nil { + return err + } + + if found && bytes.Equal(ocval.Raw, ncval.Raw) { + return nil // not changed + } + + buf.Reset(ncval.Raw) + err = act.UnmarshalCBOR(buf) + buf.Reset(nil) + + if err != nil { + return err + } + + out[addr.String()] = act + + return nil + }); err != nil { + return nil, err + } + return out, nil +} diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go index a12bbbeca..79ab20606 100644 --- a/chain/state/statetree_test.go +++ b/chain/state/statetree_test.go @@ -1,17 +1,24 @@ package state import ( + "context" + "fmt" "testing" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + address "github.com/filecoin-project/go-address" - actors "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/build" + builtin2 "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" - hamt "github.com/ipfs/go-hamt-ipld" ) func BenchmarkStateTreeSet(b *testing.B) { - cst := hamt.NewCborStore() - st, err := NewStateTree(cst) + cst := cbor.NewMemCborStore() + st, err := NewStateTree(cst, builtin2.VersionForNetwork(build.NewestNetworkVersion)) if err != nil { b.Fatal(err) } @@ -26,8 +33,8 @@ func BenchmarkStateTreeSet(b *testing.B) { } err = st.SetActor(a, &types.Actor{ Balance: types.NewInt(1258812523), - Code: actors.StorageMinerCodeCid, - Head: actors.AccountCodeCid, + Code: builtin.StorageMinerActorCodeID, + Head: builtin.AccountActorCodeID, Nonce: uint64(i), }) if err != nil { @@ -37,8 +44,8 @@ func BenchmarkStateTreeSet(b *testing.B) { } func BenchmarkStateTreeSetFlush(b *testing.B) { - cst := hamt.NewCborStore() - st, err := NewStateTree(cst) + cst := cbor.NewMemCborStore() + st, err := NewStateTree(cst, builtin2.VersionForNetwork(build.NewestNetworkVersion)) if err != nil { b.Fatal(err) } @@ -53,22 +60,22 @@ func BenchmarkStateTreeSetFlush(b *testing.B) { } err = st.SetActor(a, &types.Actor{ Balance: types.NewInt(1258812523), - Code: actors.StorageMinerCodeCid, - Head: actors.AccountCodeCid, + Code: builtin.StorageMinerActorCodeID, + Head: builtin.AccountActorCodeID, Nonce: uint64(i), }) if err != nil { b.Fatal(err) } - if _, err := st.Flush(); err != nil { + if _, err := st.Flush(context.TODO()); err != nil { b.Fatal(err) } } } func BenchmarkStateTree10kGetActor(b *testing.B) { - cst := hamt.NewCborStore() - st, err := NewStateTree(cst) + cst := cbor.NewMemCborStore() + st, err := NewStateTree(cst, builtin2.VersionForNetwork(build.NewestNetworkVersion)) if err != nil { b.Fatal(err) } @@ -79,8 +86,8 @@ func BenchmarkStateTree10kGetActor(b *testing.B) { } err = st.SetActor(a, &types.Actor{ Balance: types.NewInt(1258812523 + uint64(i)), - Code: actors.StorageMinerCodeCid, - Head: actors.AccountCodeCid, + Code: builtin.StorageMinerActorCodeID, + Head: builtin.AccountActorCodeID, Nonce: uint64(i), }) if err != nil { @@ -88,7 +95,7 @@ func BenchmarkStateTree10kGetActor(b *testing.B) { } } - if _, err := st.Flush(); err != nil { + if _, err := st.Flush(context.TODO()); err != nil { b.Fatal(err) } @@ -109,8 +116,8 @@ func BenchmarkStateTree10kGetActor(b *testing.B) { } func TestSetCache(t *testing.T) { - cst := hamt.NewCborStore() - st, err := NewStateTree(cst) + cst := cbor.NewMemCborStore() + st, err := NewStateTree(cst, builtin2.VersionForNetwork(build.NewestNetworkVersion)) if err != nil { t.Fatal(err) } @@ -122,8 +129,8 @@ func TestSetCache(t *testing.T) { act := &types.Actor{ Balance: types.NewInt(0), - Code: actors.StorageMinerCodeCid, - Head: actors.AccountCodeCid, + Code: builtin.StorageMinerActorCodeID, + Head: builtin.AccountActorCodeID, Nonce: 0, } @@ -139,7 +146,136 @@ func TestSetCache(t *testing.T) { t.Fatal(err) } - if outact.Nonce != act.Nonce { - t.Error("nonce didn't match") + if outact.Nonce == 1 { + t.Error("nonce should not have updated") + } +} + +func TestSnapshots(t *testing.T) { + ctx := context.Background() + cst := cbor.NewMemCborStore() + st, err := NewStateTree(cst, builtin2.VersionForNetwork(build.NewestNetworkVersion)) + if err != nil { + t.Fatal(err) + } + + var addrs []address.Address + //for _, a := range []string{"t15ocrptbu4i5qucjvvwecihd7fqqgzb27pz5l5zy", "t1dpyvgavvl3f4ujlk6odedss54z6rt5gyuknsuva", "t1feiejbkcvozy7iltt2pxzuoq4d2kpbsusugan7a", "t3rgjfqybjx7bahuhfv7nwfg3tlm4i4zyvldfirjvzm5z5xwjoqbj3rfi2mpmlxpqwxxxafgpkjilqzpg7cefa"} { + for _, a := range []string{"t0100", "t0101", "t0102", "t0103"} { + addr, err := address.NewFromString(a) + if err != nil { + t.Fatal(err) + } + addrs = append(addrs, addr) + } + + if err := st.Snapshot(ctx); err != nil { + t.Fatal(err) + } + + if err := st.SetActor(addrs[0], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(55)}); err != nil { + t.Fatal(err) + } + + { // sub call that will fail + if err := st.Snapshot(ctx); err != nil { + t.Fatal(err) + } + + if err := st.SetActor(addrs[1], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(77)}); err != nil { + t.Fatal(err) + } + + if err := st.Revert(); err != nil { + t.Fatal(err) + } + st.ClearSnapshot() + } + + // more operations in top level call... + if err := st.SetActor(addrs[2], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(123)}); err != nil { + t.Fatal(err) + } + + { // sub call that succeeds + if err := st.Snapshot(ctx); err != nil { + t.Fatal(err) + } + + if err := st.SetActor(addrs[3], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(5)}); err != nil { + t.Fatal(err) + } + + st.ClearSnapshot() + } + + st.ClearSnapshot() + + if _, err := st.Flush(ctx); err != nil { + t.Fatal(err) + } + + assertHas(t, st, addrs[0]) + assertNotHas(t, st, addrs[1]) + assertHas(t, st, addrs[2]) + assertHas(t, st, addrs[3]) +} + +func assertHas(t *testing.T, st *StateTree, addr address.Address) { + _, err := st.GetActor(addr) + if err != nil { + t.Fatal(err) + } +} + +func assertNotHas(t *testing.T, st *StateTree, addr address.Address) { + _, err := st.GetActor(addr) + if err == nil { + t.Fatal("shouldnt have found actor", addr) + } +} + +func TestStateTreeConsistency(t *testing.T) { + cst := cbor.NewMemCborStore() + st, err := NewStateTree(cst, builtin2.VersionForNetwork(build.NewestNetworkVersion)) + if err != nil { + t.Fatal(err) + } + + var addrs []address.Address + for i := 100; i < 150; i++ { + a, err := address.NewIDAddress(uint64(i)) + if err != nil { + t.Fatal(err) + } + + addrs = append(addrs, a) + } + + randomCid, err := cid.Decode("bafy2bzacecu7n7wbtogznrtuuvf73dsz7wasgyneqasksdblxupnyovmtwxxu") + if err != nil { + t.Fatal(err) + } + + for i, a := range addrs { + err := st.SetActor(a, &types.Actor{ + Code: randomCid, + Head: randomCid, + Balance: types.NewInt(uint64(10000 + i)), + Nonce: uint64(1000 - i), + }) + if err != nil { + t.Fatalf("while setting actor: %+v", err) + } + } + + root, err := st.Flush(context.TODO()) + if err != nil { + t.Fatal(err) + } + + fmt.Println("root is: ", root) + if root.String() != "bafy2bzaceb2bhqw75pqp44efoxvlnm73lnctq6djair56bfn5x3gw56epcxbi" { + t.Fatal("MISMATCH!") } } diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index cfbe729c0..f4dfc7115 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -4,39 +4,58 @@ import ( "context" "fmt" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" "github.com/ipfs/go-cid" "go.opencensus.io/trace" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" ) -func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate cid.Cid, r vm.Rand, bheight uint64) (*types.MessageReceipt, error) { +func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate cid.Cid, r vm.Rand, bheight abi.ChainEpoch) (*api.InvocResult, error) { ctx, span := trace.StartSpan(ctx, "statemanager.CallRaw") defer span.End() - vmi, err := vm.NewVM(bstate, bheight, r, actors.NetworkAddress, sm.cs.Blockstore()) + vmopt := &vm.VMOpts{ + StateBase: bstate, + Epoch: bheight, + Rand: r, + Bstore: sm.cs.Blockstore(), + Syscalls: sm.cs.VMSys(), + CircSupplyCalc: sm.GetCirculatingSupply, + NtwkVersion: sm.GetNtwkVersion, + BaseFee: types.NewInt(0), + } + + vmi, err := vm.NewVM(ctx, vmopt) if err != nil { return nil, xerrors.Errorf("failed to set up vm: %w", err) } - if msg.GasLimit == types.EmptyInt { - msg.GasLimit = types.NewInt(10000000000) + if msg.GasLimit == 0 { + msg.GasLimit = build.BlockGasLimit } - if msg.GasPrice == types.EmptyInt { - msg.GasPrice = types.NewInt(0) + if msg.GasFeeCap == types.EmptyInt { + msg.GasFeeCap = types.NewInt(0) } + if msg.GasPremium == types.EmptyInt { + msg.GasPremium = types.NewInt(0) + } + if msg.Value == types.EmptyInt { msg.Value = types.NewInt(0) } if span.IsRecordingEvents() { span.AddAttributes( - trace.Int64Attribute("gas_limit", int64(msg.GasLimit.Uint64())), - trace.Int64Attribute("gas_price", int64(msg.GasPrice.Uint64())), + trace.Int64Attribute("gas_limit", msg.GasLimit), + trace.StringAttribute("gas_feecap", msg.GasFeeCap.String()), trace.StringAttribute("value", msg.Value.String()), ) } @@ -49,37 +68,137 @@ func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate msg.Nonce = fromActor.Nonce // TODO: maybe just use the invoker directly? - ret, err := vmi.ApplyMessage(ctx, msg) + ret, err := vmi.ApplyImplicitMessage(ctx, msg) if err != nil { return nil, xerrors.Errorf("apply message failed: %w", err) } + var errs string if ret.ActorErr != nil { + errs = ret.ActorErr.Error() log.Warnf("chain call failed: %s", ret.ActorErr) } - return &ret.MessageReceipt, nil + + return &api.InvocResult{ + Msg: msg, + MsgRct: &ret.MessageReceipt, + ExecutionTrace: ret.ExecutionTrace, + Error: errs, + Duration: ret.Duration, + }, nil } -func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*types.MessageReceipt, error) { +func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) { if ts == nil { ts = sm.cs.GetHeaviestTipSet() } state := ts.ParentState() - r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height()) + r := store.NewChainRand(sm.cs, ts.Cids()) return sm.CallRaw(ctx, msg, state, r, ts.Height()) } +func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet) (*api.InvocResult, error) { + ctx, span := trace.StartSpan(ctx, "statemanager.CallWithGas") + defer span.End() + + if ts == nil { + ts = sm.cs.GetHeaviestTipSet() + } + + state, _, err := sm.TipSetState(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("computing tipset state: %w", err) + } + + r := store.NewChainRand(sm.cs, ts.Cids()) + + if span.IsRecordingEvents() { + span.AddAttributes( + trace.Int64Attribute("gas_limit", msg.GasLimit), + trace.StringAttribute("gas_feecap", msg.GasFeeCap.String()), + trace.StringAttribute("value", msg.Value.String()), + ) + } + + vmopt := &vm.VMOpts{ + StateBase: state, + Epoch: ts.Height() + 1, + Rand: r, + Bstore: sm.cs.Blockstore(), + Syscalls: sm.cs.VMSys(), + CircSupplyCalc: sm.GetCirculatingSupply, + NtwkVersion: sm.GetNtwkVersion, + BaseFee: ts.Blocks()[0].ParentBaseFee, + } + vmi, err := vm.NewVM(ctx, vmopt) + if err != nil { + return nil, xerrors.Errorf("failed to set up vm: %w", err) + } + for i, m := range priorMsgs { + _, err := vmi.ApplyMessage(ctx, m) + if err != nil { + return nil, xerrors.Errorf("applying prior message (%d, %s): %w", i, m.Cid(), err) + } + } + + fromActor, err := vmi.StateTree().GetActor(msg.From) + if err != nil { + return nil, xerrors.Errorf("call raw get actor: %s", err) + } + + msg.Nonce = fromActor.Nonce + + fromKey, err := sm.ResolveToKeyAddress(ctx, msg.From, ts) + if err != nil { + return nil, xerrors.Errorf("could not resolve key: %w", err) + } + + var msgApply types.ChainMsg + + switch fromKey.Protocol() { + case address.BLS: + msgApply = msg + case address.SECP256K1: + msgApply = &types.SignedMessage{ + Message: *msg, + Signature: crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: make([]byte, 65), + }, + } + + } + + ret, err := vmi.ApplyMessage(ctx, msgApply) + if err != nil { + return nil, xerrors.Errorf("apply message failed: %w", err) + } + + var errs string + if ret.ActorErr != nil { + errs = ret.ActorErr.Error() + } + + return &api.InvocResult{ + Msg: msg, + MsgRct: &ret.MessageReceipt, + ExecutionTrace: ret.ExecutionTrace, + Error: errs, + Duration: ret.Duration, + }, nil +} + var errHaltExecution = fmt.Errorf("halt") func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) { var outm *types.Message var outr *vm.ApplyRet - _, _, err := sm.computeTipSetState(ctx, ts.Blocks(), func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error { + _, _, err := sm.computeTipSetState(ctx, ts, func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error { if c == mcid { outm = m outr = ret @@ -91,5 +210,9 @@ func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.C return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err) } + if outr == nil { + return nil, nil, xerrors.Errorf("given message not found in tipset") + } + return outm, outr, nil } diff --git a/chain/stmgr/fork_no_p_eps.go b/chain/stmgr/fork_no_p_eps.go deleted file mode 100644 index 2eb112ca6..000000000 --- a/chain/stmgr/fork_no_p_eps.go +++ /dev/null @@ -1,127 +0,0 @@ -package stmgr - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-amt-ipld" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-hamt-ipld" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/types" -) - -func (sm *StateManager) forkNoPowerEPS(ctx context.Context, pstate cid.Cid) (cid.Cid, error) { - cst := hamt.CSTFromBstore(sm.cs.Blockstore()) - st, err := state.LoadStateTree(cst, pstate) - if err != nil { - return cid.Undef, xerrors.Errorf("loading parent state tree: %w", err) - } - - if err := st.MutateActor(actors.StoragePowerAddress, func(spa *types.Actor) error { - var head actors.StoragePowerState - if err := cst.Get(ctx, spa.Head, &head); err != nil { - return xerrors.Errorf("reading StoragePower state: %w", err) - } - - buckets, err := amt.LoadAMT(amt.WrapBlockstore(sm.cs.Blockstore()), head.ProvingBuckets) - if err != nil { - return xerrors.Errorf("opening proving buckets AMT: %w", err) - } - - fixedBuckets := map[uint64]map[address.Address]struct{}{} - - if err := buckets.ForEach(func(bucketId uint64, ent *typegen.Deferred) error { - var bcid cid.Cid - if err := cbor.DecodeInto(ent.Raw, &bcid); err != nil { - return xerrors.Errorf("decoding bucket cid: %w", err) - } - - bucket, err := hamt.LoadNode(ctx, cst, bcid) - if err != nil { - return xerrors.Errorf("loading bucket hamt: %w", err) - } - - return bucket.ForEach(ctx, func(abytes string, _ interface{}) error { - addr, err := address.NewFromBytes([]byte(abytes)) - if err != nil { - return xerrors.Errorf("parsing address in proving bucket: %w", err) - } - - // now find the correct bucket - miner, err := st.GetActor(addr) - if err != nil { - return xerrors.Errorf("getting miner %s: %w", addr, err) - } - - var minerHead actors.StorageMinerActorState - if err := cst.Get(ctx, miner.Head, &minerHead); err != nil { - return xerrors.Errorf("reading miner %s state: %w", addr, err) - } - - correctBucket := minerHead.ElectionPeriodStart % build.SlashablePowerDelay - if correctBucket != bucketId { - log.Warnf("miner %s was in wrong proving bucket %d, putting in %d (eps: %d)", addr, bucketId, correctBucket, minerHead.ElectionPeriodStart) - } - - if _, ok := fixedBuckets[correctBucket]; !ok { - fixedBuckets[correctBucket] = map[address.Address]struct{}{} - } - fixedBuckets[correctBucket][addr] = struct{}{} - - return nil - }) - }); err != nil { - return err - } - - // ///// - // Write fixed buckets - - fixed := amt.NewAMT(amt.WrapBlockstore(sm.cs.Blockstore())) - - for bucketId, addrss := range fixedBuckets { - bucket := hamt.NewNode(cst) - for addr := range addrss { - if err := bucket.Set(ctx, string(addr.Bytes()), actors.CborNull); err != nil { - return xerrors.Errorf("setting address in bucket: %w", err) - } - } - - if err := bucket.Flush(ctx); err != nil { - return xerrors.Errorf("flushing bucket amt: %w", err) - } - - bcid, err := cst.Put(context.TODO(), bucket) - if err != nil { - return xerrors.Errorf("put bucket: %w", err) - } - - if err := fixed.Set(bucketId, bcid); err != nil { - return xerrors.Errorf("set bucket: %w", err) - } - } - - head.ProvingBuckets, err = fixed.Flush() - if err != nil { - return xerrors.Errorf("flushing bucket amt: %w", err) - } - - spa.Head, err = cst.Put(ctx, &head) - if err != nil { - return xerrors.Errorf("putting actor head: %w", err) - } - - return nil - }); err != nil { - return cid.Undef, err - } - - return st.Flush() -} diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 0f92f3438..252b731d7 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -1,25 +1,625 @@ package stmgr import ( + "bytes" "context" + "encoding/binary" + "math" + + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/state" + + "github.com/filecoin-project/specs-actors/actors/migration/nv3" + "github.com/ipfs/go-cid" - "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" ) -func (sm *StateManager) handleStateForks(ctx context.Context, pstate cid.Cid, height, parentH uint64) (_ cid.Cid, err error) { - for i := parentH; i < height; i++ { - switch i { - case build.ForkNoPowerEPSUpdates: - pstate, err = sm.forkNoPowerEPS(ctx, pstate) - if err != nil { - return cid.Undef, xerrors.Errorf("executing state fork in epoch %d: %w", i, err) - } +var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, ExecCallback, cid.Cid, *types.TipSet) (cid.Cid, error){ + build.UpgradeBreezeHeight: UpgradeFaucetBurnRecovery, + build.UpgradeIgnitionHeight: UpgradeIgnition, + build.UpgradeLiftoffHeight: UpgradeLiftoff, +} - log.Infof("forkNoPowerEPS state: %s", pstate) +func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) { + retCid := root + var err error + f, ok := ForksAtHeight[height] + if ok { + retCid, err = f(ctx, sm, cb, root, ts) + if err != nil { + return cid.Undef, err } } - return pstate, nil + return retCid, nil +} + +func doTransfer(cb ExecCallback, tree types.StateTree, from, to address.Address, amt abi.TokenAmount) error { + fromAct, err := tree.GetActor(from) + if err != nil { + return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err) + } + + fromAct.Balance = types.BigSub(fromAct.Balance, amt) + if fromAct.Balance.Sign() < 0 { + return xerrors.Errorf("(sanity) deducted more funds from target account than it had (%s, %s)", from, types.FIL(amt)) + } + + if err := tree.SetActor(from, fromAct); err != nil { + return xerrors.Errorf("failed to persist from actor: %w", err) + } + + toAct, err := tree.GetActor(to) + if err != nil { + return xerrors.Errorf("failed to get 'to' actor for transfer: %w", err) + } + + toAct.Balance = types.BigAdd(toAct.Balance, amt) + + if err := tree.SetActor(to, toAct); err != nil { + return xerrors.Errorf("failed to persist to actor: %w", err) + } + + if cb != nil { + // record the transfer in execution traces + + fakeMsg := &types.Message{ + From: from, + To: to, + Value: amt, + Nonce: math.MaxUint64, + } + fakeRct := &types.MessageReceipt{ + ExitCode: 0, + Return: nil, + GasUsed: 0, + } + + if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *fakeRct, + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{ + Msg: fakeMsg, + MsgRct: fakeRct, + Error: "", + Duration: 0, + GasCharges: nil, + Subcalls: nil, + }, + Duration: 0, + GasCosts: vm.ZeroGasOutputs(), + }); err != nil { + return xerrors.Errorf("recording transfer: %w", err) + } + } + + return nil +} + +func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { + // Some initial parameters + FundsForMiners := types.FromFil(1_000_000) + LookbackEpoch := abi.ChainEpoch(32000) + AccountCap := types.FromFil(0) + BaseMinerBalance := types.FromFil(20) + DesiredReimbursementBalance := types.FromFil(5_000_000) + + isSystemAccount := func(addr address.Address) (bool, error) { + id, err := address.IDFromAddress(addr) + if err != nil { + return false, xerrors.Errorf("id address: %w", err) + } + + if id < 1000 { + return true, nil + } + return false, nil + } + + minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount { + return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow) + } + + // Grab lookback state for account checks + lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err) + } + + lbtree, err := sm.ParentState(lbts) + if err != nil { + return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err) + } + + ReserveAddress, err := address.NewFromString("t090") + if err != nil { + return cid.Undef, xerrors.Errorf("failed to parse reserve address: %w", err) + } + + tree, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + type transfer struct { + From address.Address + To address.Address + Amt abi.TokenAmount + } + + var transfers []transfer + + // Take all excess funds away, put them into the reserve account + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + switch act.Code { + case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: + sysAcc, err := isSystemAccount(addr) + if err != nil { + return xerrors.Errorf("checking system account: %w", err) + } + + if !sysAcc { + transfers = append(transfers, transfer{ + From: addr, + To: ReserveAddress, + Amt: act.Balance, + }) + } + case builtin0.StorageMinerActorCodeID: + var st miner0.State + if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil { + return xerrors.Errorf("failed to load miner state: %w", err) + } + + var available abi.TokenAmount + { + defer func() { + if err := recover(); err != nil { + log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err) + } + available = abi.NewTokenAmount(0) + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available = st.GetAvailableBalance(act.Balance) + } + + transfers = append(transfers, transfer{ + From: addr, + To: ReserveAddress, + Amt: available, + }) + } + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) + } + + // Execute transfers from previous step + for _, t := range transfers { + if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil { + return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) + } + } + + // pull up power table to give miners back some funds proportional to their power + var ps power0.State + powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err) + } + + cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) + if err := cst.Get(ctx, powAct.Head, &ps); err != nil { + return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err) + } + + totalPower := ps.TotalBytesCommitted + + var transfersBack []transfer + // Now, we return some funds to places where they are needed + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + lbact, err := lbtree.GetActor(addr) + if err != nil { + if !xerrors.Is(err, types.ErrActorNotFound) { + return xerrors.Errorf("failed to get actor in lookback state") + } + } + + prevBalance := abi.NewTokenAmount(0) + if lbact != nil { + prevBalance = lbact.Balance + } + + switch act.Code { + case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: + nbalance := big.Min(prevBalance, AccountCap) + if nbalance.Sign() != 0 { + transfersBack = append(transfersBack, transfer{ + From: ReserveAddress, + To: addr, + Amt: nbalance, + }) + } + case builtin0.StorageMinerActorCodeID: + var st miner0.State + if err := sm.ChainStore().Store(ctx).Get(ctx, act.Head, &st); err != nil { + return xerrors.Errorf("failed to load miner state: %w", err) + } + + var minfo miner0.MinerInfo + if err := cst.Get(ctx, st.Info, &minfo); err != nil { + return xerrors.Errorf("failed to get miner info: %w", err) + } + + sectorsArr, err := adt0.AsArray(sm.ChainStore().Store(ctx), st.Sectors) + if err != nil { + return xerrors.Errorf("failed to load sectors array: %w", err) + } + + slen := sectorsArr.Length() + + power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize))) + + mfunds := minerFundsAlloc(power, totalPower) + transfersBack = append(transfersBack, transfer{ + From: ReserveAddress, + To: minfo.Worker, + Amt: mfunds, + }) + + // Now make sure to give each miner who had power at the lookback some FIL + lbact, err := lbtree.GetActor(addr) + if err == nil { + var lbst miner0.State + if err := sm.ChainStore().Store(ctx).Get(ctx, lbact.Head, &lbst); err != nil { + return xerrors.Errorf("failed to load miner state: %w", err) + } + + lbsectors, err := adt0.AsArray(sm.ChainStore().Store(ctx), lbst.Sectors) + if err != nil { + return xerrors.Errorf("failed to load lb sectors array: %w", err) + } + + if lbsectors.Length() > 0 { + transfersBack = append(transfersBack, transfer{ + From: ReserveAddress, + To: minfo.Worker, + Amt: BaseMinerBalance, + }) + } + + } else { + log.Warnf("failed to get miner in lookback state: %s", err) + } + } + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) + } + + for _, t := range transfersBack { + if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil { + return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) + } + } + + // transfer all burnt funds back to the reserve account + burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err) + } + if err := doTransfer(cb, tree, builtin0.BurntFundsActorAddr, ReserveAddress, burntAct.Balance); err != nil { + return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err) + } + + // Top up the reimbursement service + reimbAddr, err := address.NewFromString("t0111") + if err != nil { + return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address") + } + + reimb, err := tree.GetActor(reimbAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err) + } + + difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance) + if err := doTransfer(cb, tree, ReserveAddress, reimbAddr, difference); err != nil { + return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err) + } + + // Now, a final sanity check to make sure the balances all check out + total := abi.NewTokenAmount(0) + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + total = types.BigAdd(total, act.Balance) + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err) + } + + exp := types.FromFil(build.FilBase) + if !exp.Equals(total) { + return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) + } + + return tree.Flush(ctx) +} + +func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { + store := sm.cs.Store(ctx) + + nst, err := nv3.MigrateStateTree(ctx, store, root, build.UpgradeIgnitionHeight) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors state: %w", err) + } + + tree, err := sm.StateTree(nst) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + err = setNetworkName(ctx, store, tree, "ignition") + if err != nil { + return cid.Undef, xerrors.Errorf("setting network name: %w", err) + } + + split1, err := address.NewFromString("t0115") + if err != nil { + return cid.Undef, xerrors.Errorf("first split address: %w", err) + } + + split2, err := address.NewFromString("t0116") + if err != nil { + return cid.Undef, xerrors.Errorf("second split address: %w", err) + } + + err = resetGenesisMsigs(ctx, sm, store, tree) + if err != nil { + return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) + } + + err = splitGenesisMultisig(ctx, cb, split1, store, tree, 50) + if err != nil { + return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) + } + + err = splitGenesisMultisig(ctx, cb, split2, store, tree, 50) + if err != nil { + return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) + } + + err = nv3.CheckStateTree(ctx, store, nst, build.UpgradeIgnitionHeight, builtin0.TotalFilecoin) + if err != nil { + return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err) + } + + return tree.Flush(ctx) +} + +func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { + tree, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + err = setNetworkName(ctx, sm.cs.Store(ctx), tree, "mainnet") + if err != nil { + return cid.Undef, xerrors.Errorf("setting network name: %w", err) + } + + return tree.Flush(ctx) +} + +func setNetworkName(ctx context.Context, store adt0.Store, tree *state.StateTree, name string) error { + ia, err := tree.GetActor(builtin0.InitActorAddr) + if err != nil { + return xerrors.Errorf("getting init actor: %w", err) + } + + var initState init0.State + if err := store.Get(ctx, ia.Head, &initState); err != nil { + return xerrors.Errorf("reading init state: %w", err) + } + + initState.NetworkName = name + + ia.Head, err = store.Put(ctx, &initState) + if err != nil { + return xerrors.Errorf("writing new init state: %w", err) + } + + if err := tree.SetActor(builtin0.InitActorAddr, ia); err != nil { + return xerrors.Errorf("setting init actor: %w", err) + } + + return nil +} + +func splitGenesisMultisig(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64) error { + if portions < 1 { + return xerrors.Errorf("cannot split into 0 portions") + } + + mact, err := tree.GetActor(addr) + if err != nil { + return xerrors.Errorf("getting msig actor: %w", err) + } + + mst, err := multisig.Load(store, mact) + if err != nil { + return xerrors.Errorf("getting msig state: %w", err) + } + + signers, err := mst.Signers() + if err != nil { + return xerrors.Errorf("getting msig signers: %w", err) + } + + thresh, err := mst.Threshold() + if err != nil { + return xerrors.Errorf("getting msig threshold: %w", err) + } + + ibal, err := mst.InitialBalance() + if err != nil { + return xerrors.Errorf("getting msig initial balance: %w", err) + } + + se, err := mst.StartEpoch() + if err != nil { + return xerrors.Errorf("getting msig start epoch: %w", err) + } + + ud, err := mst.UnlockDuration() + if err != nil { + return xerrors.Errorf("getting msig unlock duration: %w", err) + } + + pending, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return xerrors.Errorf("failed to create empty map: %w", err) + } + + newIbal := big.Div(ibal, types.NewInt(portions)) + newState := &multisig0.State{ + Signers: signers, + NumApprovalsThreshold: thresh, + NextTxnID: 0, + InitialBalance: newIbal, + StartEpoch: se, + UnlockDuration: ud, + PendingTxns: pending, + } + + scid, err := store.Put(ctx, newState) + if err != nil { + return xerrors.Errorf("storing new state: %w", err) + } + + newActor := types.Actor{ + Code: builtin0.MultisigActorCodeID, + Head: scid, + Nonce: 0, + Balance: big.Zero(), + } + + i := uint64(0) + for i < portions { + keyAddr, err := makeKeyAddr(addr, i) + if err != nil { + return xerrors.Errorf("creating key address: %w", err) + } + + idAddr, err := tree.RegisterNewAddress(keyAddr) + if err != nil { + return xerrors.Errorf("registering new address: %w", err) + } + + err = tree.SetActor(idAddr, &newActor) + if err != nil { + return xerrors.Errorf("setting new msig actor state: %w", err) + } + + if err := doTransfer(cb, tree, addr, idAddr, newIbal); err != nil { + return xerrors.Errorf("transferring split msig balance: %w", err) + } + + i++ + } + + return nil +} + +func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) { + var b bytes.Buffer + if err := splitAddr.MarshalCBOR(&b); err != nil { + return address.Undef, xerrors.Errorf("marshalling split address: %w", err) + } + + if err := binary.Write(&b, binary.BigEndian, count); err != nil { + return address.Undef, xerrors.Errorf("writing count into a buffer: %w", err) + } + + if err := binary.Write(&b, binary.BigEndian, []byte("Ignition upgrade")); err != nil { + return address.Undef, xerrors.Errorf("writing fork name into a buffer: %w", err) + } + + addr, err := address.NewActorAddress(b.Bytes()) + if err != nil { + return address.Undef, xerrors.Errorf("create actor address: %w", err) + } + + return addr, nil +} + +func resetGenesisMsigs(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree) error { + gb, err := sm.cs.GetGenesis() + if err != nil { + return xerrors.Errorf("getting genesis block: %w", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return xerrors.Errorf("getting genesis tipset: %w", err) + } + + cst := cbor.NewCborStore(sm.cs.Blockstore()) + genesisTree, err := state.LoadStateTree(cst, gts.ParentState()) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + + err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error { + if genesisActor.Code == builtin0.MultisigActorCodeID { + currActor, err := tree.GetActor(addr) + if err != nil { + return xerrors.Errorf("loading actor: %w", err) + } + + var currState multisig0.State + if err := store.Get(ctx, currActor.Head, &currState); err != nil { + return xerrors.Errorf("reading multisig state: %w", err) + } + + currState.StartEpoch = build.UpgradeLiftoffHeight + + currActor.Head, err = store.Put(ctx, &currState) + if err != nil { + return xerrors.Errorf("writing new multisig state: %w", err) + } + + if err := tree.SetActor(addr, currActor); err != nil { + return xerrors.Errorf("setting multisig actor: %w", err) + } + } + return nil + }) + + if err != nil { + return xerrors.Errorf("iterating over genesis actors: %w", err) + } + + return nil } diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go new file mode 100644 index 000000000..a3423ccdd --- /dev/null +++ b/chain/stmgr/forks_test.go @@ -0,0 +1,225 @@ +package stmgr_test + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" + init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" + "github.com/filecoin-project/specs-actors/actors/runtime" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + lotusinit "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/stmgr" + . "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log" + cbg "github.com/whyrusleeping/cbor-gen" +) + +func init() { + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) +} + +const testForkHeight = 40 + +type testActor struct { +} + +type testActorState struct { + HasUpgraded uint64 +} + +func (tas *testActorState) MarshalCBOR(w io.Writer) error { + return cbg.CborWriteHeader(w, cbg.MajUnsignedInt, tas.HasUpgraded) +} + +func (tas *testActorState) UnmarshalCBOR(r io.Reader) error { + t, v, err := cbg.CborReadHeader(r) + if err != nil { + return err + } + if t != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type in test actor state (got %d)", t) + } + tas.HasUpgraded = v + return nil +} + +func (ta *testActor) Exports() []interface{} { + return []interface{}{ + 1: ta.Constructor, + 2: ta.TestMethod, + } +} + +func (ta *testActor) Constructor(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + rt.StateCreate(&testActorState{11}) + fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Receiver()) + + return abi.Empty +} + +func (ta *testActor) TestMethod(rt runtime.Runtime, params *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + var st testActorState + rt.StateReadonly(&st) + + if rt.CurrEpoch() > testForkHeight { + if st.HasUpgraded != 55 { + panic(aerrors.Fatal("fork updating applied in wrong order")) + } + } else { + if st.HasUpgraded != 11 { + panic(aerrors.Fatal("fork updating happened too early")) + } + } + + return abi.Empty +} + +func TestForkHeightTriggers(t *testing.T) { + logging.SetAllLoggers(logging.LevelInfo) + + ctx := context.TODO() + + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + sm := NewStateManager(cg.ChainStore()) + + inv := vm.NewInvoker() + + // predicting the address here... may break if other assumptions change + taddr, err := address.NewIDAddress(1002) + if err != nil { + t.Fatal(err) + } + + stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { + cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) + + st, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + act, err := st.GetActor(taddr) + if err != nil { + return cid.Undef, err + } + + var tas testActorState + if err := cst.Get(ctx, act.Head, &tas); err != nil { + return cid.Undef, xerrors.Errorf("in fork handler, failed to run get: %w", err) + } + + tas.HasUpgraded = 55 + + ns, err := cst.Put(ctx, &tas) + if err != nil { + return cid.Undef, err + } + + act.Head = ns + + if err := st.SetActor(taddr, act); err != nil { + return cid.Undef, err + } + + return st.Flush(ctx) + } + + inv.Register(builtin.PaymentChannelActorCodeID, &testActor{}, &testActorState{}) + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { + nvm, err := vm.NewVM(ctx, vmopt) + if err != nil { + return nil, err + } + nvm.SetInvoker(inv) + return nvm, nil + }) + + cg.SetStateManager(sm) + + var msgs []*types.SignedMessage + + enc, err := actors.SerializeParams(&init_.ExecParams{CodeCID: builtin.PaymentChannelActorCodeID}) + if err != nil { + t.Fatal(err) + } + + m := &types.Message{ + From: cg.Banker(), + To: lotusinit.Address, + Method: builtin.MethodsInit.Exec, + Params: enc, + GasLimit: types.TestGasLimit, + } + sig, err := cg.Wallet().Sign(ctx, cg.Banker(), m.Cid().Bytes()) + if err != nil { + t.Fatal(err) + } + msgs = append(msgs, &types.SignedMessage{ + Signature: *sig, + Message: *m, + }) + + nonce := uint64(1) + cg.GetMessages = func(cg *gen.ChainGen) ([]*types.SignedMessage, error) { + if len(msgs) > 0 { + fmt.Println("added construct method") + m := msgs + msgs = nil + return m, nil + } + + m := &types.Message{ + From: cg.Banker(), + To: taddr, + Method: 2, + Params: nil, + Nonce: nonce, + GasLimit: types.TestGasLimit, + } + nonce++ + + sig, err := cg.Wallet().Sign(ctx, cg.Banker(), m.Cid().Bytes()) + if err != nil { + return nil, err + } + + return []*types.SignedMessage{ + { + Signature: *sig, + Message: *m, + }, + }, nil + } + + for i := 0; i < 50; i++ { + _, err = cg.NextTipSet() + if err != nil { + t.Fatal(err) + } + } +} diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go new file mode 100644 index 000000000..9a9b80265 --- /dev/null +++ b/chain/stmgr/read.go @@ -0,0 +1,66 @@ +package stmgr + +import ( + "context" + + "golang.org/x/xerrors" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/types" +) + +func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, error) { + ts, err := sm.cs.GetTipSetFromKey(tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + return sm.ParentState(ts) +} + +func (sm *StateManager) ParentState(ts *types.TipSet) (*state.StateTree, error) { + cst := cbor.NewCborStore(sm.cs.Blockstore()) + state, err := state.LoadStateTree(cst, sm.parentState(ts)) + if err != nil { + return nil, xerrors.Errorf("load state tree: %w", err) + } + + return state, nil +} + +func (sm *StateManager) StateTree(st cid.Cid) (*state.StateTree, error) { + cst := cbor.NewCborStore(sm.cs.Blockstore()) + state, err := state.LoadStateTree(cst, st) + if err != nil { + return nil, xerrors.Errorf("load state tree: %w", err) + } + + return state, nil +} + +func (sm *StateManager) LoadActor(_ context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, error) { + state, err := sm.ParentState(ts) + if err != nil { + return nil, err + } + return state.GetActor(addr) +} + +func (sm *StateManager) LoadActorTsk(_ context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { + state, err := sm.ParentStateTsk(tsk) + if err != nil { + return nil, err + } + return state.GetActor(addr) +} + +func (sm *StateManager) LoadActorRaw(_ context.Context, addr address.Address, st cid.Cid) (*types.Actor, error) { + state, err := sm.StateTree(st) + if err != nil { + return nil, err + } + return state.GetActor(addr) +} diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index b880492f2..e800ce665 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -5,21 +5,35 @@ import ( "fmt" "sync" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/trace" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" - amt "github.com/filecoin-project/go-amt-ipld" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + "github.com/filecoin-project/lotus/chain/actors/builtin/paych" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - logging "github.com/ipfs/go-log" - "go.opencensus.io/trace" ) var log = logging.Logger("statemgr") @@ -27,13 +41,18 @@ var log = logging.Logger("statemgr") type StateManager struct { cs *store.ChainStore - stCache map[string][]cid.Cid - compWait map[string]chan struct{} - stlk sync.Mutex + stCache map[string][]cid.Cid + compWait map[string]chan struct{} + stlk sync.Mutex + genesisMsigLk sync.Mutex + newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) + preIgnitionGenInfos *genesisInfo + postIgnitionGenInfos *genesisInfo } func NewStateManager(cs *store.ChainStore) *StateManager { return &StateManager{ + newVM: vm.NewVM, cs: cs, stCache: make(map[string][]cid.Cid), compWait: make(map[string]chan struct{}), @@ -97,7 +116,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil } - st, rec, err = sm.computeTipSetState(ctx, ts.Blocks(), nil) + st, rec, err = sm.computeTipSetState(ctx, ts, nil) if err != nil { return cid.Undef, cid.Undef, err } @@ -105,180 +124,205 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return st, rec, nil } -func (sm *StateManager) computeTipSetState(ctx context.Context, blks []*types.BlockHeader, cb func(cid.Cid, *types.Message, *vm.ApplyRet) error) (cid.Cid, cid.Cid, error) { - ctx, span := trace.StartSpan(ctx, "computeTipSetState") - defer span.End() +func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { + return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { + ir := &api.InvocResult{ + Msg: msg, + MsgRct: &ret.MessageReceipt, + ExecutionTrace: ret.ExecutionTrace, + Duration: ret.Duration, + } + if ret.ActorErr != nil { + ir.Error = ret.ActorErr.Error() + } + *trace = append(*trace, ir) + return nil + } +} - for i := 0; i < len(blks); i++ { - for j := i + 1; j < len(blks); j++ { - if blks[i].Miner == blks[j].Miner { - return cid.Undef, cid.Undef, - xerrors.Errorf("duplicate miner in a tipset (%s %s)", - blks[i].Miner, blks[j].Miner) +func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { + var trace []*api.InvocResult + st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace)) + if err != nil { + return cid.Undef, nil, err + } + + return st, trace, nil +} + +type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error + +func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { + + makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) { + vmopt := &vm.VMOpts{ + StateBase: base, + Epoch: epoch, + Rand: r, + Bstore: sm.cs.Blockstore(), + Syscalls: sm.cs.VMSys(), + CircSupplyCalc: sm.GetCirculatingSupply, + NtwkVersion: sm.GetNtwkVersion, + BaseFee: baseFee, + } + + return sm.newVM(ctx, vmopt) + } + + vmi, err := makeVmWithBaseState(pstate) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) + } + + runCron := func() error { + // TODO: this nonce-getting is a tiny bit ugly + ca, err := vmi.StateTree().GetActor(builtin0.SystemActorAddr) + if err != nil { + return err + } + + cronMsg := &types.Message{ + To: builtin0.CronActorAddr, + From: builtin0.SystemActorAddr, + Nonce: ca.Nonce, + Value: types.NewInt(0), + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little + Method: builtin0.MethodsCron.EpochTick, + Params: nil, + } + ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg) + if err != nil { + return err + } + if cb != nil { + if err := cb(cronMsg.Cid(), cronMsg, ret); err != nil { + return xerrors.Errorf("callback failed on cron message: %w", err) } } - } - - pstate := blks[0].ParentStateRoot - if len(blks[0].Parents) > 0 { // don't support forks on genesis - parent, err := sm.cs.GetBlock(blks[0].Parents[0]) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) - } - - pstate, err = sm.handleStateForks(ctx, blks[0].ParentStateRoot, blks[0].Height, parent.Height) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) - } - } - - cids := make([]cid.Cid, len(blks)) - for i, v := range blks { - cids[i] = v.Cid() - } - - r := store.NewChainRand(sm.cs, cids, blks[0].Height) - - vmi, err := vm.NewVM(pstate, blks[0].Height, r, address.Undef, sm.cs.Blockstore()) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("instantiating VM failed: %w", err) - } - - netact, err := vmi.StateTree().GetActor(actors.NetworkAddress) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get network actor: %w", err) - } - reward := vm.MiningReward(netact.Balance) - for tsi, b := range blks { - netact, err = vmi.StateTree().GetActor(actors.NetworkAddress) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get network actor: %w", err) - } - vmi.SetBlockMiner(b.Miner) - - owner, err := GetMinerOwner(ctx, sm, pstate, b.Miner) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get owner for miner %s: %w", b.Miner, err) - } - - act, err := vmi.StateTree().GetActor(owner) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get miner owner actor") - } - - if err := vm.Transfer(netact, act, reward); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to deduct funds from network actor: %w", err) - } - - // all block miners created a valid post, go update the actor state - postSubmitMsg := &types.Message{ - From: actors.NetworkAddress, - Nonce: netact.Nonce, - To: b.Miner, - Method: actors.MAMethods.SubmitElectionPoSt, - GasPrice: types.NewInt(0), - GasLimit: types.NewInt(10000000000), - Value: types.NewInt(0), - } - ret, err := vmi.ApplyMessage(ctx, postSubmitMsg) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("submit election post message for block %s (miner %s) invocation failed: %w", b.Cid(), b.Miner, err) - } if ret.ExitCode != 0 { - return cid.Undef, cid.Undef, xerrors.Errorf("submit election post invocation returned nonzero exit code: %d (err = %s, block = %s, miner = %s, tsi = %d)", ret.ExitCode, ret.ActorErr, b.Cid(), b.Miner, tsi) + return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode) } - } - // TODO: can't use method from chainstore because it doesnt let us know who the block miners were - applied := make(map[address.Address]uint64) - balances := make(map[address.Address]types.BigInt) - - preloadAddr := func(a address.Address) error { - if _, ok := applied[a]; !ok { - act, err := vmi.StateTree().GetActor(a) - if err != nil { - return err - } - - applied[a] = act.Nonce - balances[a] = act.Balance - } return nil } - var receipts []cbg.CBORMarshaler - for _, b := range blks { - vmi.SetBlockMiner(b.Miner) - - bms, sms, err := sm.cs.MessagesForBlock(b) + for i := parentEpoch; i < epoch; i++ { + // handle state forks + // XXX: The state tree + newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts) if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get messages for block: %w", err) + return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) } - cmsgs := make([]store.ChainMsg, 0, len(bms)+len(sms)) - for _, m := range bms { - cmsgs = append(cmsgs, m) - } - for _, sm := range sms { - cmsgs = append(cmsgs, sm) + if pstate != newState { + vmi, err = makeVmWithBaseState(newState) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) + } } - for _, cm := range cmsgs { + if i > parentEpoch { + // run cron for null rounds if any + if err := runCron(); err != nil { + return cid.Cid{}, cid.Cid{}, err + } + + newState, err = vmi.Flush(ctx) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } + } + + vmi.SetBlockHeight(i + 1) + pstate = newState + } + + var receipts []cbg.CBORMarshaler + processedMsgs := map[cid.Cid]bool{} + for _, b := range bms { + penalty := types.NewInt(0) + gasReward := big.Zero() + + for _, cm := range append(b.BlsMessages, b.SecpkMessages...) { m := cm.VMMessage() - if err := preloadAddr(m.From); err != nil { - return cid.Undef, cid.Undef, err - } - - if applied[m.From] != m.Nonce { + if _, found := processedMsgs[m.Cid()]; found { continue } - applied[m.From]++ - - if balances[m.From].LessThan(m.RequiredFunds()) { - continue - } - balances[m.From] = types.BigSub(balances[m.From], m.RequiredFunds()) - - r, err := vmi.ApplyMessage(ctx, m) + r, err := vmi.ApplyMessage(ctx, cm) if err != nil { return cid.Undef, cid.Undef, err } receipts = append(receipts, &r.MessageReceipt) + gasReward = big.Add(gasReward, r.GasCosts.MinerTip) + penalty = big.Add(penalty, r.GasCosts.MinerPenalty) if cb != nil { if err := cb(cm.Cid(), m, r); err != nil { return cid.Undef, cid.Undef, err } } + processedMsgs[m.Cid()] = true + } + + params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{ + Miner: b.Miner, + Penalty: penalty, + GasReward: gasReward, + WinCount: b.WinCount, + }) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err) + } + + sysAct, actErr := vmi.StateTree().GetActor(builtin0.SystemActorAddr) + if actErr != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) + } + + rwMsg := &types.Message{ + From: builtin0.SystemActorAddr, + To: reward.Address, + Nonce: sysAct.Nonce, + Value: types.NewInt(0), + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + GasLimit: 1 << 30, + Method: builtin0.MethodsReward.AwardBlockReward, + Params: params, + } + ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg) + if actErr != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, err) + } + if cb != nil { + if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) + } + } + + if ret.ExitCode != 0 { + return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) } } - // TODO: this nonce-getting is a tiny bit ugly - ca, err := vmi.StateTree().GetActor(actors.CronAddress) - if err != nil { - return cid.Undef, cid.Undef, err + if err := runCron(); err != nil { + return cid.Cid{}, cid.Cid{}, err } - ret, err := vmi.ApplyMessage(ctx, &types.Message{ - To: actors.CronAddress, - From: actors.CronAddress, - Nonce: ca.Nonce, - Value: types.NewInt(0), - GasPrice: types.NewInt(0), - GasLimit: types.NewInt(1 << 30), // Make super sure this is never too little - Method: actors.CAMethods.EpochTick, - Params: nil, - }) + // XXX: Is the height correct? Or should it be epoch-1? + rectarr, err := adt.NewArray(sm.cs.Store(ctx), builtin.VersionForNetwork(sm.GetNtwkVersion(ctx, epoch))) if err != nil { - return cid.Undef, cid.Undef, err + return cid.Undef, cid.Undef, xerrors.Errorf("failed to create receipts amt: %w", err) } - if ret.ExitCode != 0 { - return cid.Undef, cid.Undef, xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode) + for i, receipt := range receipts { + if err := rectarr.Set(uint64(i), receipt); err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) + } } - - bs := amt.WrapBlockstore(sm.cs.Blockstore()) - rectroot, err := amt.FromArray(bs, receipts) + rectroot, err := rectarr.Root() if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) } @@ -291,51 +335,64 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, blks []*types.Bl return st, rectroot, nil } -func (sm *StateManager) GetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { +func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, cb ExecCallback) (cid.Cid, cid.Cid, error) { + ctx, span := trace.StartSpan(ctx, "computeTipSetState") + defer span.End() + + blks := ts.Blocks() + + for i := 0; i < len(blks); i++ { + for j := i + 1; j < len(blks); j++ { + if blks[i].Miner == blks[j].Miner { + return cid.Undef, cid.Undef, + xerrors.Errorf("duplicate miner in a tipset (%s %s)", + blks[i].Miner, blks[j].Miner) + } + } + } + + var parentEpoch abi.ChainEpoch + pstate := blks[0].ParentStateRoot + if blks[0].Height > 0 { + parent, err := sm.cs.GetBlock(blks[0].Parents[0]) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) + } + + parentEpoch = parent.Height + } + + cids := make([]cid.Cid, len(blks)) + for i, v := range blks { + cids[i] = v.Cid() + } + + r := store.NewChainRand(sm.cs, cids) + + blkmsgs, err := sm.cs.BlockMsgsForTipset(ts) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err) + } + + baseFee := blks[0].ParentBaseFee + + return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts) +} + +func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid { if ts == nil { ts = sm.cs.GetHeaviestTipSet() } - stcid := ts.ParentState() - - cst := hamt.CSTFromBstore(sm.cs.Blockstore()) - state, err := state.LoadStateTree(cst, stcid) - if err != nil { - return nil, xerrors.Errorf("load state tree: %w", err) - } - - return state.GetActor(addr) -} - -func (sm *StateManager) GetBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) { - act, err := sm.GetActor(addr, ts) - if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { - return types.NewInt(0), nil - } - return types.EmptyInt, xerrors.Errorf("get actor: %w", err) - } - - return act.Balance, nil + return ts.ParentState() } func (sm *StateManager) ChainStore() *store.ChainStore { return sm.cs } -func (sm *StateManager) LoadActorState(ctx context.Context, a address.Address, out interface{}, ts *types.TipSet) (*types.Actor, error) { - act, err := sm.GetActor(a, ts) - if err != nil { - return nil, err - } - - cst := hamt.CSTFromBstore(sm.cs.Blockstore()) - if err := cst.Get(ctx, act.Head, out); err != nil { - return nil, err - } - - return act, nil -} +// ResolveToKeyAddress is similar to `vm.ResolveToKeyAddr` but does not allow `Actor` type of addresses. +// Uses the `TipSet` `ts` to generate the VM state. func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { switch addr.Protocol() { case address.BLS, address.SECP256K1: @@ -354,7 +411,7 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad return address.Undef, xerrors.Errorf("resolve address failed to get tipset state: %w", err) } - cst := hamt.CSTFromBstore(sm.cs.Blockstore()) + cst := cbor.NewCborStore(sm.cs.Blockstore()) tree, err := state.LoadStateTree(cst, st) if err != nil { return address.Undef, xerrors.Errorf("failed to load state tree") @@ -363,7 +420,7 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad return vm.ResolveToKeyAddr(tree, cst, addr) } -func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk bls.PublicKey, err error) { +func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk []byte, err error) { kaddr, err := sm.ResolveToKeyAddress(ctx, addr, ts) if err != nil { return pubk, xerrors.Errorf("failed to resolve address to key address: %w", err) @@ -373,8 +430,16 @@ func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Addres return pubk, xerrors.Errorf("address must be BLS address to load bls public key") } - copy(pubk[:], kaddr.Payload()) - return pubk, nil + return kaddr.Payload(), nil +} + +func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { + cst := cbor.NewCborStore(sm.cs.Blockstore()) + state, err := state.LoadStateTree(cst, sm.parentState(ts)) + if err != nil { + return address.Undef, xerrors.Errorf("load state tree: %w", err) + } + return state.LookupID(addr) } func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.TipSet) (*types.MessageReceipt, error) { @@ -383,7 +448,7 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T return nil, fmt.Errorf("failed to load message: %w", err) } - r, err := sm.tipsetExecutedMessage(ts, msg, m.VMMessage()) + r, _, err := sm.tipsetExecutedMessage(ts, msg, m.VMMessage()) if err != nil { return nil, err } @@ -392,7 +457,7 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T return r, nil } - _, r, err = sm.searchBackForMsg(ctx, ts, m) + _, r, _, err = sm.searchBackForMsg(ctx, ts, m) if err != nil { return nil, fmt.Errorf("failed to look back through chain for message: %w", err) } @@ -400,44 +465,48 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T return r, nil } -func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, error) { +// WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already +// happened. It guarantees that the message has been on chain for at least confidence epochs without being reverted +// before returning. +func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() msg, err := sm.cs.GetCMessage(mcid) if err != nil { - return nil, nil, fmt.Errorf("failed to load message: %w", err) + return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } tsub := sm.cs.SubHeadChanges(ctx) head, ok := <-tsub if !ok { - return nil, nil, fmt.Errorf("SubHeadChanges stream was invalid") + return nil, nil, cid.Undef, fmt.Errorf("SubHeadChanges stream was invalid") } if len(head) != 1 { - return nil, nil, fmt.Errorf("SubHeadChanges first entry should have been one item") + return nil, nil, cid.Undef, fmt.Errorf("SubHeadChanges first entry should have been one item") } if head[0].Type != store.HCCurrent { - return nil, nil, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) + return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) } - r, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage()) + r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage()) if err != nil { - return nil, nil, err + return nil, nil, cid.Undef, err } if r != nil { - return head[0].Val, r, nil + return head[0].Val, r, foundMsg, nil } var backTs *types.TipSet var backRcp *types.MessageReceipt + var backFm cid.Cid backSearchWait := make(chan struct{}) go func() { - fts, r, err := sm.searchBackForMsg(ctx, head[0].Val, msg) + fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg) if err != nil { log.Warnf("failed to look back through chain for message: %w", err) return @@ -445,98 +514,162 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid) (*type backTs = fts backRcp = r + backFm = foundMsg close(backSearchWait) }() + var candidateTs *types.TipSet + var candidateRcp *types.MessageReceipt + var candidateFm cid.Cid + heightOfHead := head[0].Val.Height() + reverts := map[types.TipSetKey]bool{} + for { select { case notif, ok := <-tsub: if !ok { - return nil, nil, ctx.Err() + return nil, nil, cid.Undef, ctx.Err() } for _, val := range notif { switch val.Type { case store.HCRevert: - continue + if val.Val.Equals(candidateTs) { + candidateTs = nil + candidateRcp = nil + candidateFm = cid.Undef + } + if backSearchWait != nil { + reverts[val.Val.Key()] = true + } case store.HCApply: - r, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage()) + if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) { + return candidateTs, candidateRcp, candidateFm, nil + } + r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage()) if err != nil { - return nil, nil, err + return nil, nil, cid.Undef, err } if r != nil { - return val.Val, r, nil + if confidence == 0 { + return val.Val, r, foundMsg, err + } + candidateTs = val.Val + candidateRcp = r + candidateFm = foundMsg } + heightOfHead = val.Val.Height() } } case <-backSearchWait: - if backTs != nil { - return backTs, backRcp, nil + // check if we found the message in the chain and that is hasn't been reverted since we started searching + if backTs != nil && !reverts[backTs.Key()] { + // if head is at or past confidence interval, return immediately + if heightOfHead >= backTs.Height()+abi.ChainEpoch(confidence) { + return backTs, backRcp, backFm, nil + } + + // wait for confidence interval + candidateTs = backTs + candidateRcp = backRcp + candidateFm = backFm } + reverts = nil backSearchWait = nil case <-ctx.Done(): - return nil, nil, ctx.Err() + return nil, nil, cid.Undef, ctx.Err() } } } -func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m store.ChainMsg) (*types.TipSet, *types.MessageReceipt, error) { +func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { + msg, err := sm.cs.GetCMessage(mcid) + if err != nil { + return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) + } + + head := sm.cs.GetHeaviestTipSet() + + r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage()) + if err != nil { + return nil, nil, cid.Undef, err + } + + if r != nil { + return head, r, foundMsg, nil + } + + fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg) + + if err != nil { + log.Warnf("failed to look back through chain for message %s", mcid) + return nil, nil, cid.Undef, err + } + + if fts == nil { + return nil, nil, cid.Undef, nil + } + + return fts, r, foundMsg, nil +} + +func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { cur := from for { if cur.Height() == 0 { // it ain't here! - return nil, nil, nil + return nil, nil, cid.Undef, nil } select { case <-ctx.Done(): - return nil, nil, nil + return nil, nil, cid.Undef, nil default: } - act, err := sm.GetActor(m.VMMessage().From, cur) + act, err := sm.LoadActor(ctx, m.VMMessage().From, cur) if err != nil { - return nil, nil, err + return nil, nil, cid.Cid{}, err } - if act.Nonce < m.VMMessage().Nonce { - // nonce on chain is before message nonce we're looking for, its - // not going to be here - return nil, nil, nil + // we either have no messages from the sender, or the latest message we found has a lower nonce than the one being searched for, + // either way, no reason to lookback, it ain't there + if act.Nonce == 0 || act.Nonce < m.VMMessage().Nonce { + return nil, nil, cid.Undef, nil } ts, err := sm.cs.LoadTipSet(cur.Parents()) if err != nil { - return nil, nil, fmt.Errorf("failed to load tipset during msg wait searchback: %w", err) + return nil, nil, cid.Undef, fmt.Errorf("failed to load tipset during msg wait searchback: %w", err) } - r, err := sm.tipsetExecutedMessage(ts, m.Cid(), m.VMMessage()) + r, foundMsg, err := sm.tipsetExecutedMessage(ts, m.Cid(), m.VMMessage()) if err != nil { - return nil, nil, fmt.Errorf("checking for message execution during lookback: %w", err) + return nil, nil, cid.Undef, fmt.Errorf("checking for message execution during lookback: %w", err) } if r != nil { - return ts, r, nil + return ts, r, foundMsg, nil } cur = ts } } -func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, error) { +func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, cid.Cid, error) { // The genesis block did not execute any messages if ts.Height() == 0 { - return nil, nil + return nil, cid.Undef, nil } pts, err := sm.cs.LoadTipSet(ts.Parents()) if err != nil { - return nil, err + return nil, cid.Undef, err } cm, err := sm.cs.MessagesForTipset(pts) if err != nil { - return nil, err + return nil, cid.Undef, err } for ii := range cm { @@ -546,21 +679,30 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm if m.VMMessage().From == vmm.From { // cheaper to just check origin first if m.VMMessage().Nonce == vmm.Nonce { - if m.Cid() == msg { - return sm.cs.GetParentReceipt(ts.Blocks()[0], i) + if m.VMMessage().EqualCall(vmm) { + if m.Cid() != msg { + log.Warnw("found message with equal nonce and call params but different CID", + "wanted", msg, "found", m.Cid(), "nonce", vmm.Nonce, "from", vmm.From) + } + + pr, err := sm.cs.GetParentReceipt(ts.Blocks()[0], i) + if err != nil { + return nil, cid.Undef, err + } + return pr, m.Cid(), nil } // this should be that message - return nil, xerrors.Errorf("found message with equal nonce as the one we are looking for (F:%s n %d, TS: %s n%d)", + return nil, cid.Undef, xerrors.Errorf("found message with equal nonce as the one we are looking for (F:%s n %d, TS: %s n%d)", msg, vmm.Nonce, m.Cid(), m.VMMessage().Nonce) } if m.VMMessage().Nonce < vmm.Nonce { - return nil, nil // don't bother looking further + return nil, cid.Undef, nil // don't bother looking further } } } - return nil, nil + return nil, cid.Undef, nil } func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([]address.Address, error) { @@ -572,18 +714,13 @@ func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([] return nil, err } - cst := hamt.CSTFromBstore(sm.cs.Blockstore()) - r, err := hamt.LoadNode(ctx, cst, st) + stateTree, err := sm.StateTree(st) if err != nil { return nil, err } var out []address.Address - err = r.ForEach(ctx, func(k string, val interface{}) error { - addr, err := address.NewFromBytes([]byte(k)) - if err != nil { - return xerrors.Errorf("address in state tree was not valid: %w", err) - } + err = stateTree.ForEach(func(addr address.Address, act *types.Actor) error { out = append(out, addr) return nil }) @@ -594,16 +731,586 @@ func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([] return out, nil } -func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (actors.StorageParticipantBalance, error) { - var state actors.StorageMarketState - if _, err := sm.LoadActorState(ctx, actors.StorageMarketAddress, &state, ts); err != nil { - return actors.StorageParticipantBalance{}, err - } - cst := hamt.CSTFromBstore(sm.cs.Blockstore()) - b, _, err := actors.GetMarketBalances(ctx, cst, state.Balances, addr) +func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (api.MarketBalance, error) { + st, err := sm.ParentState(ts) if err != nil { - return actors.StorageParticipantBalance{}, err + return api.MarketBalance{}, err } - return b[0], nil + act, err := st.GetActor(market.Address) + if err != nil { + return api.MarketBalance{}, err + } + + mstate, err := market.Load(sm.cs.Store(ctx), act) + if err != nil { + return api.MarketBalance{}, err + } + + addr, err = sm.LookupID(ctx, addr, ts) + if err != nil { + return api.MarketBalance{}, err + } + + var out api.MarketBalance + + et, err := mstate.EscrowTable() + if err != nil { + return api.MarketBalance{}, err + } + out.Escrow, err = et.Get(addr) + if err != nil { + return api.MarketBalance{}, xerrors.Errorf("getting escrow balance: %w", err) + } + + lt, err := mstate.LockedTable() + if err != nil { + return api.MarketBalance{}, err + } + out.Locked, err = lt.Get(addr) + if err != nil { + return api.MarketBalance{}, xerrors.Errorf("getting locked balance: %w", err) + } + + return out, nil +} + +func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) error { + tschain := []*types.TipSet{ts} + for ts.Height() != 0 { + next, err := sm.cs.LoadTipSet(ts.Parents()) + if err != nil { + return err + } + + tschain = append(tschain, next) + ts = next + } + + lastState := tschain[len(tschain)-1].ParentState() + for i := len(tschain) - 1; i >= 0; i-- { + cur := tschain[i] + log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids()) + if cur.ParentState() != lastState { + return xerrors.Errorf("tipset chain had state mismatch at height %d", cur.Height()) + } + st, _, err := sm.TipSetState(ctx, cur) + if err != nil { + return err + } + lastState = st + } + + return nil +} + +func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (*vm.VM, error)) { + sm.newVM = nvm +} + +type genesisInfo struct { + genesisMsigs []msig0.State + // info about the Accounts in the genesis state + genesisActors []genesisActor + genesisPledge abi.TokenAmount + genesisMarketFunds abi.TokenAmount +} + +type genesisActor struct { + addr address.Address + initBal abi.TokenAmount +} + +// sets up information about the actors in the genesis state +func (sm *StateManager) setupGenesisActors(ctx context.Context) error { + + gi := genesisInfo{} + + gb, err := sm.cs.GetGenesis() + if err != nil { + return xerrors.Errorf("getting genesis block: %w", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return xerrors.Errorf("getting genesis tipset: %w", err) + } + + st, _, err := sm.TipSetState(ctx, gts) + if err != nil { + return xerrors.Errorf("getting genesis tipset state: %w", err) + } + + cst := cbor.NewCborStore(sm.cs.Blockstore()) + sTree, err := state.LoadStateTree(cst, st) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + + gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis market funds: %w", err) + } + + gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis pledge: %w", err) + } + + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + err = sTree.ForEach(func(kaddr address.Address, act *types.Actor) error { + if act.IsMultisigActor() { + s, err := multisig.Load(sm.cs.Store(ctx), act) + if err != nil { + return err + } + + se, err := s.StartEpoch() + if err != nil { + return err + } + + if se != 0 { + return xerrors.New("genesis multisig doesn't start vesting at epoch 0!") + } + + ud, err := s.UnlockDuration() + if err != nil { + return err + } + + ib, err := s.InitialBalance() + if err != nil { + return err + } + + ot, f := totalsByEpoch[ud] + if f { + totalsByEpoch[ud] = big.Add(ot, ib) + } else { + totalsByEpoch[ud] = ib + } + + } else if act.IsAccountActor() { + // should exclude burnt funds actor and "remainder account actor" + // should only ever be "faucet" accounts in testnets + if kaddr == builtin0.BurntFundsActorAddr { + return nil + } + + kid, err := sTree.LookupID(kaddr) + if err != nil { + return xerrors.Errorf("resolving address: %w", err) + } + + gi.genesisActors = append(gi.genesisActors, genesisActor{ + addr: kid, + initBal: act.Balance, + }) + } + return nil + }) + + if err != nil { + return xerrors.Errorf("error setting up genesis infos: %w", err) + } + + // TODO: use network upgrade abstractions or always start at actors v0? + gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + InitialBalance: v, + UnlockDuration: k, + PendingTxns: cid.Undef, + } + gi.genesisMsigs = append(gi.genesisMsigs, ns) + } + + sm.preIgnitionGenInfos = &gi + + return nil +} + +// sets up information about the actors in the genesis state +// For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs +// We also do not consider ANY account actors (including the faucet) +func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error { + + gi := genesisInfo{} + + gb, err := sm.cs.GetGenesis() + if err != nil { + return xerrors.Errorf("getting genesis block: %w", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return xerrors.Errorf("getting genesis tipset: %w", err) + } + + st, _, err := sm.TipSetState(ctx, gts) + if err != nil { + return xerrors.Errorf("getting genesis tipset state: %w", err) + } + + cst := cbor.NewCborStore(sm.cs.Blockstore()) + sTree, err := state.LoadStateTree(cst, st) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + + gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis market funds: %w", err) + } + + gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis pledge: %w", err) + } + + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin0.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(49_929_341) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) + + // 1 year + oneYear := abi.ChainEpoch(365 * builtin0.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) + + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin0.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) + + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin0.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) + + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin0.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + + gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + InitialBalance: v, + UnlockDuration: k, + PendingTxns: cid.Undef, + } + gi.genesisMsigs = append(gi.genesisMsigs, ns) + } + + sm.preIgnitionGenInfos = &gi + + return nil +} + +// sets up information about the actors in the genesis state, post the ignition fork +func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error { + + gi := genesisInfo{} + + gb, err := sm.cs.GetGenesis() + if err != nil { + return xerrors.Errorf("getting genesis block: %w", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return xerrors.Errorf("getting genesis tipset: %w", err) + } + + st, _, err := sm.TipSetState(ctx, gts) + if err != nil { + return xerrors.Errorf("getting genesis tipset state: %w", err) + } + + cst := cbor.NewCborStore(sm.cs.Blockstore()) + sTree, err := state.LoadStateTree(cst, st) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + + // Unnecessary, should be removed + gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis market funds: %w", err) + } + + // Unnecessary, should be removed + gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis pledge: %w", err) + } + + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin0.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(49_929_341) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) + + // 1 year + oneYear := abi.ChainEpoch(365 * builtin0.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) + + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin0.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) + + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin0.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) + + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin0.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + + gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error + InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))), + UnlockDuration: k, + PendingTxns: cid.Undef, + // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself. + StartEpoch: build.UpgradeLiftoffHeight, + } + gi.genesisMsigs = append(gi.genesisMsigs, ns) + } + + sm.postIgnitionGenInfos = &gi + + return nil +} + +// GetVestedFunds returns all funds that have "left" actors that are in the genesis state: +// - For Multisigs, it counts the actual amounts that have vested at the given epoch +// - For Accounts, it counts max(currentBalance - genesisBalance, 0). +func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { + vf := big.Zero() + if height <= build.UpgradeIgnitionHeight { + for _, v := range sm.preIgnitionGenInfos.genesisMsigs { + au := big.Sub(v.InitialBalance, v.AmountLocked(height)) + vf = big.Add(vf, au) + } + } else { + for _, v := range sm.postIgnitionGenInfos.genesisMsigs { + // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. + // The start epoch changed in the Ignition upgrade. + au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) + vf = big.Add(vf, au) + } + } + + // there should not be any such accounts in testnet (and also none in mainnet?) + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + for _, v := range sm.preIgnitionGenInfos.genesisActors { + act, err := st.GetActor(v.addr) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to get actor: %w", err) + } + + diff := big.Sub(v.initBal, act.Balance) + if diff.GreaterThan(big.Zero()) { + vf = big.Add(vf, diff) + } + } + + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge) + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds) + + return vf, nil +} + +func GetFilMined(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { + ractor, err := st.GetActor(reward.Address) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load reward actor state: %w", err) + } + + rst, err := reward.Load(adt.WrapStore(ctx, st.Store), ractor) + if err != nil { + return big.Zero(), err + } + + return rst.TotalStoragePowerReward() +} + +func getFilMarketLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { + act, err := st.GetActor(market.Address) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load market actor: %w", err) + } + + mst, err := market.Load(adt.WrapStore(ctx, st.Store), act) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load market state: %w", err) + } + + return mst.TotalLocked() +} + +func getFilPowerLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { + pactor, err := st.GetActor(power.Address) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load power actor: %w", err) + } + + pst, err := power.Load(adt.WrapStore(ctx, st.Store), pactor) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load power state: %w", err) + } + + return pst.TotalLocked() +} + +func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { + + filMarketLocked, err := getFilMarketLocked(ctx, st) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to get filMarketLocked: %w", err) + } + + filPowerLocked, err := getFilPowerLocked(ctx, st) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to get filPowerLocked: %w", err) + } + + return types.BigAdd(filMarketLocked, filPowerLocked), nil +} + +func GetFilBurnt(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { + burnt, err := st.GetActor(builtin0.BurntFundsActorAddr) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load burnt actor: %w", err) + } + + return burnt.Balance, nil +} + +func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { + sm.genesisMsigLk.Lock() + defer sm.genesisMsigLk.Unlock() + if sm.preIgnitionGenInfos == nil { + err := sm.setupPreIgnitionGenesisActorsTestnet(ctx) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err) + } + } + if sm.postIgnitionGenInfos == nil { + err := sm.setupPostIgnitionGenesisActors(ctx) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err) + } + } + + filVested, err := sm.GetFilVested(ctx, height, st) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err) + } + + filMined, err := GetFilMined(ctx, st) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filMined: %w", err) + } + + filBurnt, err := GetFilBurnt(ctx, st) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filBurnt: %w", err) + } + + filLocked, err := sm.GetFilLocked(ctx, st) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filLocked: %w", err) + } + + ret := types.BigAdd(filVested, filMined) + ret = types.BigSub(ret, filBurnt) + ret = types.BigSub(ret, filLocked) + + if ret.LessThan(big.Zero()) { + ret = big.Zero() + } + + return api.CirculatingSupply{ + FilVested: filVested, + FilMined: filMined, + FilBurnt: filBurnt, + FilLocked: filLocked, + FilCirculating: ret, + }, nil +} + +func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { + csi, err := sm.GetCirculatingSupplyDetailed(ctx, height, st) + if err != nil { + return big.Zero(), err + } + + return csi.FilCirculating, nil +} + +func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { + // TODO: move hard fork epoch checks to a schedule defined in build/ + + if build.UseNewestNetwork() { + return build.NewestNetworkVersion + } + + if height <= build.UpgradeBreezeHeight { + return network.Version0 + } + + if height <= build.UpgradeSmokeHeight { + return network.Version1 + } + + if height <= build.UpgradeIgnitionHeight { + return network.Version2 + } + + return build.NewestNetworkVersion +} + +func (sm *StateManager) GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) { + st, err := sm.ParentState(ts) + if err != nil { + return nil, nil, err + } + + act, err := st.GetActor(addr) + if err != nil { + return nil, nil, err + } + + actState, err := paych.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, nil, err + } + return act, actState, nil +} + +func (sm *StateManager) GetMarketState(ctx context.Context, ts *types.TipSet) (market.State, error) { + st, err := sm.ParentState(ts) + if err != nil { + return nil, err + } + + act, err := st.GetActor(market.Address) + if err != nil { + return nil, err + } + + actState, err := market.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, err + } + return actState, nil } diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 109848f8e..58e7f480f 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -1,313 +1,656 @@ package stmgr import ( + "bytes" "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" - ffi "github.com/filecoin-project/filecoin-ffi" - sectorbuilder "github.com/filecoin-project/go-sectorbuilder" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - - amt "github.com/filecoin-project/go-amt-ipld" cid "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + account0 "github.com/filecoin-project/specs-actors/actors/builtin/account" + cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron" + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) +func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.NetworkName, error) { + act, err := sm.LoadActorRaw(ctx, init_.Address, st) + if err != nil { + return "", err + } + ias, err := init_.Load(sm.cs.Store(ctx), act) + if err != nil { + return "", err + } + + return ias.NetworkName() +} + func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (address.Address, error) { - recp, err := sm.CallRaw(ctx, &types.Message{ - To: maddr, - From: maddr, - Method: actors.MAMethods.GetWorkerAddr, - }, st, nil, 0) + state, err := sm.StateTree(st) if err != nil { - return address.Undef, xerrors.Errorf("callRaw failed: %w", err) + return address.Undef, xerrors.Errorf("(get sset) failed to load state tree: %w", err) } - - if recp.ExitCode != 0 { - return address.Undef, xerrors.Errorf("getting miner worker addr failed (exit code %d)", recp.ExitCode) - } - - worker, err := address.NewFromBytes(recp.Return) + act, err := state.GetActor(maddr) if err != nil { - return address.Undef, err + return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) + } + mas, err := miner.Load(sm.cs.Store(ctx), act) + if err != nil { + return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } - if worker.Protocol() == address.ID { - return address.Undef, xerrors.Errorf("need to resolve worker address to a pubkeyaddr") + info, err := mas.Info() + if err != nil { + return address.Undef, xerrors.Errorf("failed to load actor info: %w", err) } - return worker, nil + return vm.ResolveToKeyAddr(state, sm.cs.Store(ctx), info.Worker) } -func GetMinerOwner(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (address.Address, error) { - recp, err := sm.CallRaw(ctx, &types.Message{ - To: maddr, - From: maddr, - Method: actors.MAMethods.GetOwner, - }, st, nil, 0) - if err != nil { - return address.Undef, xerrors.Errorf("callRaw failed: %w", err) - } - - if recp.ExitCode != 0 { - return address.Undef, xerrors.Errorf("getting miner owner addr failed (exit code %d)", recp.ExitCode) - } - - owner, err := address.NewFromBytes(recp.Return) - if err != nil { - return address.Undef, err - } - - if owner.Protocol() == address.ID { - return address.Undef, xerrors.Errorf("need to resolve owner address to a pubkeyaddr") - } - - return owner, nil +func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, bool, error) { + return GetPowerRaw(ctx, sm, ts.ParentState(), maddr) } -func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (types.BigInt, types.BigInt, error) { - var err error +func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (power.Claim, power.Claim, bool, error) { + act, err := sm.LoadActorRaw(ctx, power.Address, st) + if err != nil { + return power.Claim{}, power.Claim{}, false, xerrors.Errorf("(get sset) failed to load power actor state: %w", err) + } - var mpow types.BigInt + pas, err := power.Load(sm.cs.Store(ctx), act) + if err != nil { + return power.Claim{}, power.Claim{}, false, err + } + tpow, err := pas.TotalPower() + if err != nil { + return power.Claim{}, power.Claim{}, false, err + } + + var mpow power.Claim if maddr != address.Undef { - enc, aerr := actors.SerializeParams(&actors.PowerLookupParams{maddr}) - if aerr != nil { - return types.EmptyInt, types.EmptyInt, aerr + var found bool + mpow, found, err = pas.MinerPower(maddr) + if err != nil || !found { + // TODO: return an error when not found? + return power.Claim{}, power.Claim{}, false, err } - ret, err := sm.Call(ctx, &types.Message{ - From: maddr, - To: actors.StoragePowerAddress, - Method: actors.SPAMethods.PowerLookup, - Params: enc, - }, ts) - if err != nil { - return types.EmptyInt, types.EmptyInt, xerrors.Errorf("failed to get miner power from chain: %w", err) - } - if ret.ExitCode != 0 { - return types.EmptyInt, types.EmptyInt, xerrors.Errorf("failed to get miner power from chain (exit code %d)", ret.ExitCode) - } - - mpow = types.BigFromBytes(ret.Return) } - ret, err := sm.Call(ctx, &types.Message{ - From: actors.StoragePowerAddress, - To: actors.StoragePowerAddress, - Method: actors.SPAMethods.GetTotalStorage, - }, ts) + minpow, err := pas.MinerNominalPowerMeetsConsensusMinimum(maddr) if err != nil { - return types.EmptyInt, types.EmptyInt, xerrors.Errorf("failed to get total power from chain: %w", err) - } - if ret.ExitCode != 0 { - return types.EmptyInt, types.EmptyInt, xerrors.Errorf("failed to get total power from chain (exit code %d)", ret.ExitCode) + return power.Claim{}, power.Claim{}, false, err } - tpow := types.BigFromBytes(ret.Return) - - return mpow, tpow, nil + return mpow, tpow, minpow, nil } -func GetMinerPeerID(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (peer.ID, error) { - recp, err := sm.Call(ctx, &types.Message{ - To: maddr, - From: maddr, - Method: actors.MAMethods.GetPeerID, - }, ts) +func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorPreCommitOnChainInfo, error) { + act, err := sm.LoadActor(ctx, maddr, ts) if err != nil { - return "", xerrors.Errorf("call failed: %w", err) + return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - if recp.ExitCode != 0 { - return "", xerrors.Errorf("getting miner peer ID failed (exit code %d)", recp.ExitCode) - } - - return peer.IDFromBytes(recp.Return) -} - -func GetMinerWorker(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (address.Address, error) { - recp, err := sm.Call(ctx, &types.Message{ - To: maddr, - From: maddr, - Method: actors.MAMethods.GetWorkerAddr, - }, ts) - if err != nil { - return address.Undef, xerrors.Errorf("call failed: %w", err) - } - - if recp.ExitCode != 0 { - return address.Undef, xerrors.Errorf("getting miner peer ID failed (exit code %d)", recp.ExitCode) - } - - return address.NewFromBytes(recp.Return) -} - -func GetMinerElectionPeriodStart(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (uint64, error) { - var mas actors.StorageMinerActorState - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return 0, xerrors.Errorf("(get eps) failed to load miner actor state: %w", err) - } - - return mas.ElectionPeriodStart, nil -} - -func SectorSetSizes(ctx context.Context, sm *StateManager, maddr address.Address, ts *types.TipSet) (api.MinerSectors, error) { - var mas actors.StorageMinerActorState - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return api.MinerSectors{}, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) - } - - blks := amt.WrapBlockstore(sm.ChainStore().Blockstore()) - ss, err := amt.LoadAMT(blks, mas.Sectors) - if err != nil { - return api.MinerSectors{}, err - } - - ps, err := amt.LoadAMT(blks, mas.ProvingSet) - if err != nil { - return api.MinerSectors{}, err - } - - return api.MinerSectors{ - Pset: ps.Count, - Sset: ss.Count, - }, nil -} - -func GetMinerProvingSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) ([]*api.ChainSectorInfo, error) { - var mas actors.StorageMinerActorState - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return nil, xerrors.Errorf("(get pset) failed to load miner actor state: %w", err) - } - - return LoadSectorsFromSet(ctx, sm.ChainStore().Blockstore(), mas.ProvingSet) -} - -func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) ([]*api.ChainSectorInfo, error) { - var mas actors.StorageMinerActorState - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) + mas, err := miner.Load(sm.cs.Store(ctx), act) if err != nil { return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } - return LoadSectorsFromSet(ctx, sm.ChainStore().Blockstore(), mas.Sectors) + return mas.GetPrecommittedSector(sid) } -func GetSectorsForElectionPost(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*sectorbuilder.SortedPublicSectorInfo, error) { - sectors, err := GetMinerProvingSet(ctx, sm, ts, maddr) +func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorOnChainInfo, error) { + act, err := sm.LoadActor(ctx, maddr, ts) if err != nil { - return nil, xerrors.Errorf("failed to get sector set for miner: %w", err) + return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - var uselessOtherArray []ffi.PublicSectorInfo - for _, s := range sectors { - var uselessBuffer [32]byte - copy(uselessBuffer[:], s.CommR) - uselessOtherArray = append(uselessOtherArray, ffi.PublicSectorInfo{ - SectorID: s.SectorID, - CommR: uselessBuffer, - }) - } - - ssi := sectorbuilder.NewSortedPublicSectorInfo(uselessOtherArray) - return &ssi, nil -} - -func GetMinerSectorSize(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (uint64, error) { - var mas actors.StorageMinerActorState - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) + mas, err := miner.Load(sm.cs.Store(ctx), act) if err != nil { - return 0, xerrors.Errorf("(get ssize) failed to load miner actor state: %w", err) + return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) } - cst := hamt.CSTFromBstore(sm.cs.Blockstore()) - var minfo actors.MinerInfo - if err := cst.Get(ctx, mas.Info, &minfo); err != nil { - return 0, xerrors.Errorf("failed to read miner info: %w", err) - } - - return minfo.SectorSize, nil + return mas.GetSector(sid) } -func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (uint64, error) { - var mas actors.StorageMinerActorState - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) +func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, snos *bitfield.BitField) ([]*miner.SectorOnChainInfo, error) { + act, err := sm.LoadActor(ctx, maddr, ts) if err != nil { - return 0, xerrors.Errorf("(get mslash) failed to load miner actor state: %w", err) + return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) } - return mas.SlashedAt, nil + mas, err := miner.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) + } + + return mas.LoadSectors(snos) } -func GetStorageDeal(ctx context.Context, sm *StateManager, dealId uint64, ts *types.TipSet) (*actors.OnChainDeal, error) { - var state actors.StorageMarketState - if _, err := sm.LoadActorState(ctx, actors.StorageMarketAddress, &state, ts); err != nil { - return nil, err +func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]proof0.SectorInfo, error) { + act, err := sm.LoadActorRaw(ctx, maddr, st) + if err != nil { + return nil, xerrors.Errorf("failed to load miner actor: %w", err) } - blks := amt.WrapBlockstore(sm.ChainStore().Blockstore()) - da, err := amt.LoadAMT(blks, state.Deals) + mas, err := miner.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, xerrors.Errorf("failed to load miner actor state: %w", err) + } + + // TODO (!!): Actor Update: Make this active sectors + + allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors) + if err != nil { + return nil, xerrors.Errorf("get all sectors: %w", err) + } + + faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors) + if err != nil { + return nil, xerrors.Errorf("get faulty sectors: %w", err) + } + + provingSectors, err := bitfield.SubtractBitField(allSectors, faultySectors) // TODO: This is wrong, as it can contain faaults, change to just ActiveSectors in an upgrade + if err != nil { + return nil, xerrors.Errorf("calc proving sectors: %w", err) + } + + numProvSect, err := provingSectors.Count() + if err != nil { + return nil, xerrors.Errorf("failed to count bits: %w", err) + } + + // TODO(review): is this right? feels fishy to me + if numProvSect == 0 { + return nil, nil + } + + info, err := mas.Info() + if err != nil { + return nil, xerrors.Errorf("getting miner info: %w", err) + } + + spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize) + if err != nil { + return nil, xerrors.Errorf("getting seal proof type: %w", err) + } + + wpt, err := spt.RegisteredWinningPoStProof() + if err != nil { + return nil, xerrors.Errorf("getting window proof type: %w", err) + } + + mid, err := address.IDFromAddress(maddr) + if err != nil { + return nil, xerrors.Errorf("getting miner ID: %w", err) + } + + ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect) + if err != nil { + return nil, xerrors.Errorf("generating winning post challenges: %w", err) + } + + iter, err := provingSectors.BitIterator() + if err != nil { + return nil, xerrors.Errorf("iterating over proving sectors: %w", err) + } + + // Select winning sectors by _index_ in the all-sectors bitfield. + selectedSectors := bitfield.New() + prev := uint64(0) + for _, n := range ids { + sno, err := iter.Nth(n - prev) + if err != nil { + return nil, xerrors.Errorf("iterating over proving sectors: %w", err) + } + selectedSectors.Set(sno) + prev = n + } + + sectors, err := mas.LoadSectors(&selectedSectors) + if err != nil { + return nil, xerrors.Errorf("loading proving sectors: %w", err) + } + + out := make([]proof0.SectorInfo, len(sectors)) + for i, sinfo := range sectors { + out[i] = proof0.SectorInfo{ + SealProof: spt, + SectorNumber: sinfo.SectorNumber, + SealedCID: sinfo.SealedCID, + } + } + + return out, nil +} + +func StateMinerInfo(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*miner.MinerInfo, error) { + act, err := sm.LoadActor(ctx, maddr, ts) + if err != nil { + return nil, xerrors.Errorf("failed to load miner actor: %w", err) + } + + mas, err := miner.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, xerrors.Errorf("failed to load miner actor state: %w", err) + } + + mi, err := mas.Info() if err != nil { return nil, err } - var ocd actors.OnChainDeal - if err := da.Get(dealId, &ocd); err != nil { + return &mi, err +} + +func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) { + act, err := sm.LoadActor(ctx, power.Address, ts) + if err != nil { + return false, xerrors.Errorf("failed to load power actor: %w", err) + } + + spas, err := power.Load(sm.cs.Store(ctx), act) + if err != nil { + return false, xerrors.Errorf("failed to load power actor state: %w", err) + } + + _, ok, err := spas.MinerPower(maddr) + if err != nil { + return false, xerrors.Errorf("getting miner power: %w", err) + } + + if !ok { + return true, nil + } + + return false, nil +} + +func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) { + act, err := sm.LoadActor(ctx, market.Address, ts) + if err != nil { + return nil, xerrors.Errorf("failed to load market actor: %w", err) + } + + state, err := market.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, xerrors.Errorf("failed to load market actor state: %w", err) + } + + proposals, err := state.Proposals() + if err != nil { return nil, err } - return &ocd, nil + proposal, found, err := proposals.Get(dealID) + + if err != nil { + return nil, err + } else if !found { + return nil, xerrors.Errorf("deal %d not found", dealID) + } + + states, err := state.States() + if err != nil { + return nil, err + } + + st, found, err := states.Get(dealID) + if err != nil { + return nil, err + } + + if !found { + st = market.EmptyDealState() + } + + return &api.MarketDeal{ + Proposal: *proposal, + State: *st, + }, nil } func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([]address.Address, error) { - var state actors.StoragePowerState - if _, err := sm.LoadActorState(ctx, actors.StoragePowerAddress, &state, ts); err != nil { - return nil, err - } - - cst := hamt.CSTFromBstore(sm.ChainStore().Blockstore()) - miners, err := actors.MinerSetList(ctx, cst, state.Miners) + act, err := sm.LoadActor(ctx, power.Address, ts) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to load power actor: %w", err) } - return miners, nil + powState, err := power.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, xerrors.Errorf("failed to load power actor state: %w", err) + } + + return powState.ListAllMiners() } -func LoadSectorsFromSet(ctx context.Context, bs blockstore.Blockstore, ssc cid.Cid) ([]*api.ChainSectorInfo, error) { - blks := amt.WrapBlockstore(bs) - a, err := amt.LoadAMT(blks, ssc) - if err != nil { - return nil, err +func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, msgs []*types.Message, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { + if ts == nil { + ts = sm.cs.GetHeaviestTipSet() } - var sset []*api.ChainSectorInfo - if err := a.ForEach(func(i uint64, v *cbg.Deferred) error { - var comms [][]byte - if err := cbor.DecodeInto(v.Raw, &comms); err != nil { - return err + base, trace, err := sm.ExecutionTrace(ctx, ts) + if err != nil { + return cid.Undef, nil, err + } + + for i := ts.Height(); i < height; i++ { + // handle state forks + base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts) + if err != nil { + return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err) } - sset = append(sset, &api.ChainSectorInfo{ - SectorID: i, - CommR: comms[0], - CommD: comms[1], - }) - return nil - }); err != nil { + + // TODO: should we also run cron here? + } + + r := store.NewChainRand(sm.cs, ts.Cids()) + vmopt := &vm.VMOpts{ + StateBase: base, + Epoch: height, + Rand: r, + Bstore: sm.cs.Blockstore(), + Syscalls: sm.cs.VMSys(), + CircSupplyCalc: sm.GetCirculatingSupply, + NtwkVersion: sm.GetNtwkVersion, + BaseFee: ts.Blocks()[0].ParentBaseFee, + } + vmi, err := vm.NewVM(ctx, vmopt) + if err != nil { + return cid.Undef, nil, err + } + + for i, msg := range msgs { + // TODO: Use the signed message length for secp messages + ret, err := vmi.ApplyMessage(ctx, msg) + if err != nil { + return cid.Undef, nil, xerrors.Errorf("applying message %s: %w", msg.Cid(), err) + } + if ret.ExitCode != 0 { + log.Infof("compute state apply message %d failed (exit: %d): %s", i, ret.ExitCode, ret.ActorErr) + } + } + + root, err := vmi.Flush(ctx) + if err != nil { + return cid.Undef, nil, err + } + + return root, trace, nil +} + +func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, error) { + var lbr abi.ChainEpoch + if round > build.WinningPoStSectorSetLookback { + lbr = round - build.WinningPoStSectorSetLookback + } + + // more null blocks than our lookback + if lbr > ts.Height() { + return ts, nil + } + + lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr, ts, true) + if err != nil { + return nil, xerrors.Errorf("failed to get lookback tipset: %w", err) + } + + return lbts, nil +} + +func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) { + ts, err := sm.ChainStore().LoadTipSet(tsk) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err) + } + + prev, err := sm.ChainStore().GetLatestBeaconEntry(ts) + if err != nil { + if os.Getenv("LOTUS_IGNORE_DRAND") != "_yes_" { + return nil, xerrors.Errorf("failed to get latest beacon entry: %w", err) + } + + prev = &types.BeaconEntry{} + } + + entries, err := beacon.BeaconEntriesForBlock(ctx, bcs, round, ts.Height(), *prev) + if err != nil { return nil, err } - return sset, nil + rbase := *prev + if len(entries) > 0 { + rbase = entries[len(entries)-1] + } + + lbts, err := GetLookbackTipSetForRound(ctx, sm, ts, round) + if err != nil { + return nil, xerrors.Errorf("getting lookback miner actor state: %w", err) + } + + lbst, _, err := sm.TipSetState(ctx, lbts) + if err != nil { + return nil, err + } + + act, err := sm.LoadActorRaw(ctx, maddr, lbst) + if err != nil { + return nil, xerrors.Errorf("failed to load miner actor: %w", err) + } + + mas, err := miner.Load(sm.cs.Store(ctx), act) + if err != nil { + return nil, xerrors.Errorf("failed to load miner actor state: %w", err) + } + + buf := new(bytes.Buffer) + if err := maddr.MarshalCBOR(buf); err != nil { + return nil, xerrors.Errorf("failed to marshal miner address: %w", err) + } + + prand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err) + } + + sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand) + if err != nil { + return nil, xerrors.Errorf("getting winning post proving set: %w", err) + } + + if len(sectors) == 0 { + return nil, nil + } + + mpow, tpow, hmp, err := GetPowerRaw(ctx, sm, lbst, maddr) + if err != nil { + return nil, xerrors.Errorf("failed to get power: %w", err) + } + + info, err := mas.Info() + if err != nil { + return nil, err + } + + worker, err := sm.ResolveToKeyAddress(ctx, info.Worker, ts) + if err != nil { + return nil, xerrors.Errorf("resolving worker address: %w", err) + } + + return &api.MiningBaseInfo{ + MinerPower: mpow.QualityAdjPower, + NetworkPower: tpow.QualityAdjPower, + Sectors: sectors, + WorkerKey: worker, + SectorSize: info.SectorSize, + PrevBeaconEntry: *prev, + BeaconEntries: entries, + HasMinPower: hmp, + }, nil +} + +type MethodMeta struct { + Name string + + Params reflect.Type + Ret reflect.Type +} + +var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{} + +func init() { + cidToMethods := map[cid.Cid][2]interface{}{ + // builtin.SystemActorCodeID: {builtin.MethodsSystem, system.Actor{} }- apparently it doesn't have methods + builtin0.InitActorCodeID: {builtin0.MethodsInit, init0.Actor{}}, + builtin0.CronActorCodeID: {builtin0.MethodsCron, cron0.Actor{}}, + builtin0.AccountActorCodeID: {builtin0.MethodsAccount, account0.Actor{}}, + builtin0.StoragePowerActorCodeID: {builtin0.MethodsPower, power0.Actor{}}, + builtin0.StorageMinerActorCodeID: {builtin0.MethodsMiner, miner0.Actor{}}, + builtin0.StorageMarketActorCodeID: {builtin0.MethodsMarket, market0.Actor{}}, + builtin0.PaymentChannelActorCodeID: {builtin0.MethodsPaych, paych0.Actor{}}, + builtin0.MultisigActorCodeID: {builtin0.MethodsMultisig, msig0.Actor{}}, + builtin0.RewardActorCodeID: {builtin0.MethodsReward, reward0.Actor{}}, + builtin0.VerifiedRegistryActorCodeID: {builtin0.MethodsVerifiedRegistry, verifreg0.Actor{}}, + } + + for c, m := range cidToMethods { + exports := m[1].(vm.Invokee).Exports() + methods := make(map[abi.MethodNum]MethodMeta, len(exports)) + + // Explicitly add send, it's special. + methods[builtin0.MethodSend] = MethodMeta{ + Name: "Send", + Params: reflect.TypeOf(new(abi.EmptyValue)), + Ret: reflect.TypeOf(new(abi.EmptyValue)), + } + + // Learn method names from the builtin.Methods* structs. + rv := reflect.ValueOf(m[0]) + rt := rv.Type() + nf := rt.NumField() + methodToName := make([]string, len(exports)) + for i := 0; i < nf; i++ { + name := rt.Field(i).Name + number := rv.Field(i).Interface().(abi.MethodNum) + methodToName[number] = name + } + + // Iterate over exported methods. Some of these _may_ be nil and + // must be skipped. + for number, export := range exports { + if export == nil { + continue + } + + ev := reflect.ValueOf(export) + et := ev.Type() + + // Make sure the method name is correct. + // This is just a nice sanity check. + fnName := runtime.FuncForPC(ev.Pointer()).Name() + fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm") + mName := methodToName[number] + if mName != fnName { + panic(fmt.Sprintf( + "actor method name is %s but exported method name is %s", + fnName, mName, + )) + } + + switch abi.MethodNum(number) { + case builtin0.MethodSend: + panic("method 0 is reserved for Send") + case builtin0.MethodConstructor: + if fnName != "Constructor" { + panic("method 1 is reserved for Constructor") + } + } + + methods[abi.MethodNum(number)] = MethodMeta{ + Name: fnName, + Params: et.In(1), + Ret: et.Out(0), + } + } + MethodsMap[c] = methods + } +} + +func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) { + act, err := sm.LoadActor(ctx, to, ts) + if err != nil { + return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err) + } + + m, found := MethodsMap[act.Code][method] + if !found { + return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code) + } + return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil +} + +func MinerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) { + pact, err := sm.LoadActor(ctx, power.Address, ts) + if err != nil { + return false, xerrors.Errorf("loading power actor state: %w", err) + } + + ps, err := power.Load(sm.cs.Store(ctx), pact) + if err != nil { + return false, err + } + + return ps.MinerNominalPowerMeetsConsensusMinimum(addr) +} + +func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) { + str, err := state.LoadStateTree(sm.ChainStore().Store(ctx), ts.ParentState()) + if err != nil { + return abi.TokenAmount{}, err + } + + sum := types.NewInt(0) + err = str.ForEach(func(a address.Address, act *types.Actor) error { + sum = types.BigAdd(sum, act.Balance) + return nil + }) + if err != nil { + return abi.TokenAmount{}, err + } + + return sum, nil } diff --git a/chain/store/basefee.go b/chain/store/basefee.go new file mode 100644 index 000000000..45785240e --- /dev/null +++ b/chain/store/basefee.go @@ -0,0 +1,83 @@ +package store + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +func ComputeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int, epoch abi.ChainEpoch) types.BigInt { + // deta := gasLimitUsed/noOfBlocks - build.BlockGasTarget + // change := baseFee * deta / BlockGasTarget + // nextBaseFee = baseFee + change + // nextBaseFee = max(nextBaseFee, build.MinimumBaseFee) + + var delta int64 + if epoch > build.UpgradeSmokeHeight { + delta = gasLimitUsed / int64(noOfBlocks) + delta -= build.BlockGasTarget + } else { + delta = build.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * build.PackingEfficiencyNum) + delta -= build.BlockGasTarget + } + + // cap change at 12.5% (BaseFeeMaxChangeDenom) by capping delta + if delta > build.BlockGasTarget { + delta = build.BlockGasTarget + } + if delta < -build.BlockGasTarget { + delta = -build.BlockGasTarget + } + + change := big.Mul(baseFee, big.NewInt(delta)) + change = big.Div(change, big.NewInt(build.BlockGasTarget)) + change = big.Div(change, big.NewInt(build.BaseFeeMaxChangeDenom)) + + nextBaseFee := big.Add(baseFee, change) + if big.Cmp(nextBaseFee, big.NewInt(build.MinimumBaseFee)) < 0 { + nextBaseFee = big.NewInt(build.MinimumBaseFee) + } + return nextBaseFee +} + +func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi.TokenAmount, error) { + if ts.Height() > build.UpgradeBreezeHeight && ts.Height() < build.UpgradeBreezeHeight+build.BreezeGasTampingDuration { + return abi.NewTokenAmount(100), nil + } + + zero := abi.NewTokenAmount(0) + + // totalLimit is sum of GasLimits of unique messages in a tipset + totalLimit := int64(0) + + seen := make(map[cid.Cid]struct{}) + + for _, b := range ts.Blocks() { + msg1, msg2, err := cs.MessagesForBlock(b) + if err != nil { + return zero, xerrors.Errorf("error getting messages for: %s: %w", b.Cid(), err) + } + for _, m := range msg1 { + c := m.Cid() + if _, ok := seen[c]; !ok { + totalLimit += m.GasLimit + seen[c] = struct{}{} + } + } + for _, m := range msg2 { + c := m.Cid() + if _, ok := seen[c]; !ok { + totalLimit += m.Message.GasLimit + seen[c] = struct{}{} + } + } + } + parentBaseFee := ts.Blocks()[0].ParentBaseFee + + return ComputeNextBaseFee(parentBaseFee, totalLimit, len(ts.Blocks()), ts.Height()), nil +} diff --git a/chain/store/basefee_test.go b/chain/store/basefee_test.go new file mode 100644 index 000000000..b4757f70e --- /dev/null +++ b/chain/store/basefee_test.go @@ -0,0 +1,34 @@ +package store + +import ( + "fmt" + "testing" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/stretchr/testify/assert" +) + +func TestBaseFee(t *testing.T) { + tests := []struct { + basefee uint64 + limitUsed int64 + noOfBlocks int + output uint64 + }{ + {100e6, 0, 1, 87.5e6}, + {100e6, 0, 5, 87.5e6}, + {100e6, build.BlockGasTarget, 1, 103.125e6}, + {100e6, build.BlockGasTarget * 2, 2, 103.125e6}, + {100e6, build.BlockGasLimit * 2, 2, 112.5e6}, + {100e6, build.BlockGasLimit * 1.5, 2, 110937500}, + } + + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { + output := ComputeNextBaseFee(types.NewInt(test.basefee), test.limitUsed, test.noOfBlocks, 0) + assert.Equal(t, fmt.Sprintf("%d", test.output), output.String()) + }) + } +} diff --git a/chain/store/fts.go b/chain/store/fts.go index f9ec4459e..0324938d7 100644 --- a/chain/store/fts.go +++ b/chain/store/fts.go @@ -32,8 +32,11 @@ func (fts *FullTipSet) Cids() []cid.Cid { return cids } +// TipSet returns a narrower view of this FullTipSet elliding the block +// messages. func (fts *FullTipSet) TipSet() *types.TipSet { if fts.tipset != nil { + // FIXME: fts.tipset is actually never set. Should it memoize? return fts.tipset } diff --git a/chain/store/index.go b/chain/store/index.go new file mode 100644 index 000000000..a9da994af --- /dev/null +++ b/chain/store/index.go @@ -0,0 +1,176 @@ +package store + +import ( + "context" + "os" + "strconv" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + lru "github.com/hashicorp/golang-lru" + "golang.org/x/xerrors" +) + +var DefaultChainIndexCacheSize = 32 << 10 + +func init() { + if s := os.Getenv("LOTUS_CHAIN_INDEX_CACHE"); s != "" { + lcic, err := strconv.Atoi(s) + if err != nil { + log.Errorf("failed to parse 'LOTUS_CHAIN_INDEX_CACHE' env var: %s", err) + } + DefaultChainIndexCacheSize = lcic + } + +} + +type ChainIndex struct { + skipCache *lru.ARCCache + + loadTipSet loadTipSetFunc + + skipLength abi.ChainEpoch +} +type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error) + +func NewChainIndex(lts loadTipSetFunc) *ChainIndex { + sc, _ := lru.NewARC(DefaultChainIndexCacheSize) + return &ChainIndex{ + skipCache: sc, + loadTipSet: lts, + skipLength: 20, + } +} + +type lbEntry struct { + ts *types.TipSet + parentHeight abi.ChainEpoch + targetHeight abi.ChainEpoch + target types.TipSetKey +} + +func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + if from.Height()-to <= ci.skipLength { + return ci.walkBack(from, to) + } + + rounded, err := ci.roundDown(from) + if err != nil { + return nil, err + } + + cur := rounded.Key() + for { + cval, ok := ci.skipCache.Get(cur) + if !ok { + fc, err := ci.fillCache(cur) + if err != nil { + return nil, err + } + cval = fc + } + + lbe := cval.(*lbEntry) + if lbe.ts.Height() == to || lbe.parentHeight < to { + return lbe.ts, nil + } else if to > lbe.targetHeight { + return ci.walkBack(lbe.ts, to) + } + + cur = lbe.target + } +} + +func (ci *ChainIndex) GetTipsetByHeightWithoutCache(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + return ci.walkBack(from, to) +} + +func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { + ts, err := ci.loadTipSet(tsk) + if err != nil { + return nil, err + } + + if ts.Height() == 0 { + return &lbEntry{ + ts: ts, + parentHeight: 0, + }, nil + } + + // will either be equal to ts.Height, or at least > ts.Parent.Height() + rheight := ci.roundHeight(ts.Height()) + + parent, err := ci.loadTipSet(ts.Parents()) + if err != nil { + return nil, err + } + + rheight -= ci.skipLength + + var skipTarget *types.TipSet + if parent.Height() < rheight { + skipTarget = parent + } else { + skipTarget, err = ci.walkBack(parent, rheight) + if err != nil { + return nil, xerrors.Errorf("fillCache walkback: %w", err) + } + } + + lbe := &lbEntry{ + ts: ts, + parentHeight: parent.Height(), + targetHeight: skipTarget.Height(), + target: skipTarget.Key(), + } + ci.skipCache.Add(tsk, lbe) + + return lbe, nil +} + +// floors to nearest skipLength multiple +func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch { + return (h / ci.skipLength) * ci.skipLength +} + +func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { + target := ci.roundHeight(ts.Height()) + + rounded, err := ci.walkBack(ts, target) + if err != nil { + return nil, err + } + + return rounded, nil +} + +func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + if to > from.Height() { + return nil, xerrors.Errorf("looking for tipset with height greater than start point") + } + + if to == from.Height() { + return from, nil + } + + ts := from + + for { + pts, err := ci.loadTipSet(ts.Parents()) + if err != nil { + return nil, err + } + + if to > pts.Height() { + // in case pts is lower than the epoch we're looking for (null blocks) + // return a tipset above that height + return ts, nil + } + if to == pts.Height() { + return pts, nil + } + + ts = pts + } +} diff --git a/chain/store/index_test.go b/chain/store/index_test.go new file mode 100644 index 000000000..63e08070c --- /dev/null +++ b/chain/store/index_test.go @@ -0,0 +1,80 @@ +package store_test + +import ( + "bytes" + "context" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types/mock" + "github.com/filecoin-project/lotus/lib/blockstore" + datastore "github.com/ipfs/go-datastore" + syncds "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" +) + +func TestIndexSeeks(t *testing.T) { + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + gencar, err := cg.GenesisCar() + if err != nil { + t.Fatal(err) + } + + gen := cg.Genesis() + + ctx := context.TODO() + + nbs := blockstore.NewTemporarySync() + cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil) + + _, err = cs.Import(bytes.NewReader(gencar)) + if err != nil { + t.Fatal(err) + } + + cur := mock.TipSet(gen) + if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil { + t.Fatal(err) + } + assert.NoError(t, cs.SetGenesis(gen)) + + // Put 113 blocks from genesis + for i := 0; i < 113; i++ { + nextts := mock.TipSet(mock.MkBlock(cur, 1, 1)) + + if err := cs.PutTipSet(ctx, nextts); err != nil { + t.Fatal(err) + } + cur = nextts + } + + // Put 50 null epochs + 1 block + skip := mock.MkBlock(cur, 1, 1) + skip.Height += 50 + + skipts := mock.TipSet(skip) + + if err := cs.PutTipSet(ctx, skipts); err != nil { + t.Fatal(err) + } + + ts, err := cs.GetTipsetByHeight(ctx, skip.Height-10, skipts, false) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, abi.ChainEpoch(164), ts.Height()) + + for i := 0; i <= 113; i++ { + ts3, err := cs.GetTipsetByHeight(ctx, abi.ChainEpoch(i), skipts, false) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, abi.ChainEpoch(i), ts3.Height()) + } +} diff --git a/chain/store/store.go b/chain/store/store.go index d59126518..1dbf69547 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -1,29 +1,44 @@ package store import ( + "bytes" "context" - "crypto/sha256" "encoding/binary" "encoding/json" + "io" + "os" + "strconv" "sync" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/minio/blake2b-simd" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/journal" + bstore "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/metrics" + + "go.opencensus.io/stats" "go.opencensus.io/trace" "go.uber.org/multierr" - amt "github.com/filecoin-project/go-amt-ipld" "github.com/filecoin-project/lotus/chain/types" lru "github.com/hashicorp/golang-lru" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" dstore "github.com/ipfs/go-datastore" - hamt "github.com/ipfs/go-hamt-ipld" - bstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + car "github.com/ipld/go-car" + carutil "github.com/ipld/go-car/util" cbg "github.com/whyrusleeping/cbor-gen" pubsub "github.com/whyrusleeping/pubsub" "golang.org/x/xerrors" @@ -32,7 +47,55 @@ import ( var log = logging.Logger("chainstore") var chainHeadKey = dstore.NewKey("head") +var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") +var DefaultTipSetCacheSize = 8192 +var DefaultMsgMetaCacheSize = 2048 + +func init() { + if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" { + tscs, err := strconv.Atoi(s) + if err != nil { + log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err) + } + DefaultTipSetCacheSize = tscs + } + + if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" { + mmcs, err := strconv.Atoi(s) + if err != nil { + log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err) + } + DefaultMsgMetaCacheSize = mmcs + } +} + +// ReorgNotifee represents a callback that gets called upon reorgs. +type ReorgNotifee func(rev, app []*types.TipSet) error + +// Journal event types. +const ( + evtTypeHeadChange = iota +) + +type HeadChangeEvt struct { + From types.TipSetKey + FromHeight abi.ChainEpoch + To types.TipSetKey + ToHeight abi.ChainEpoch + RevertCount int + ApplyCount int +} + +// ChainStore is the main point of access to chain data. +// +// Raw chain data is stored in the Blockstore, with relevant markers (genesis, +// latest head tipset references) being tracked in the Datastore (key-value +// store). +// +// To alleviate disk access, the ChainStore has two ARC caches: +// 1. a tipset cache +// 2. a block => messages references cache. type ChainStore struct { bs bstore.Blockstore ds dstore.Datastore @@ -44,43 +107,56 @@ type ChainStore struct { pubLk sync.Mutex tstLk sync.Mutex - tipsets map[uint64][]cid.Cid + tipsets map[abi.ChainEpoch][]cid.Cid - reorgCh chan<- reorg - headChangeNotifs []func(rev, app []*types.TipSet) error + cindex *ChainIndex + + reorgCh chan<- reorg + reorgNotifeeCh chan ReorgNotifee mmCache *lru.ARCCache tsCache *lru.ARCCache + + vmcalls vm.SyscallBuilder + + evtTypes [1]journal.EventType } -func NewChainStore(bs bstore.Blockstore, ds dstore.Batching) *ChainStore { - c, _ := lru.NewARC(2048) - tsc, _ := lru.NewARC(4096) +func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore { + c, _ := lru.NewARC(DefaultMsgMetaCacheSize) + tsc, _ := lru.NewARC(DefaultTipSetCacheSize) cs := &ChainStore{ bs: bs, ds: ds, bestTips: pubsub.New(64), - tipsets: make(map[uint64][]cid.Cid), + tipsets: make(map[abi.ChainEpoch][]cid.Cid), mmCache: c, tsCache: tsc, + vmcalls: vmcalls, } - cs.reorgCh = cs.reorgWorker(context.TODO()) + cs.evtTypes = [1]journal.EventType{ + evtTypeHeadChange: journal.J.RegisterEventType("sync", "head_change"), + } + + ci := NewChainIndex(cs.LoadTipSet) + + cs.cindex = ci hcnf := func(rev, app []*types.TipSet) error { cs.pubLk.Lock() defer cs.pubLk.Unlock() - notif := make([]*HeadChange, len(rev)+len(app)) + notif := make([]*api.HeadChange, len(rev)+len(app)) for i, r := range rev { - notif[i] = &HeadChange{ + notif[i] = &api.HeadChange{ Type: HCRevert, Val: r, } } for i, r := range app { - notif[i+len(rev)] = &HeadChange{ + notif[i+len(rev)] = &api.HeadChange{ Type: HCApply, Val: r, } @@ -90,7 +166,16 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching) *ChainStore { return nil } - cs.headChangeNotifs = append(cs.headChangeNotifs, hcnf) + hcmetric := func(rev, app []*types.TipSet) error { + ctx := context.Background() + for _, r := range app { + stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height()))) + } + return nil + } + + cs.reorgNotifeeCh = make(chan ReorgNotifee) + cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric}) return cs } @@ -139,19 +224,14 @@ const ( HCCurrent = "current" ) -type HeadChange struct { - Type string - Val *types.TipSet -} - -func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*HeadChange { +func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange { cs.pubLk.Lock() subch := cs.bestTips.Sub("headchange") head := cs.GetHeaviestTipSet() cs.pubLk.Unlock() - out := make(chan []*HeadChange, 16) - out <- []*HeadChange{{ + out := make(chan []*api.HeadChange, 16) + out <- []*api.HeadChange{{ Type: HCCurrent, Val: head, }} @@ -171,7 +251,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*HeadChange { log.Warnf("head change sub is slow, has %d buffered entries", len(out)) } select { - case out <- val.([]*HeadChange): + case out <- val.([]*api.HeadChange): case <-ctx.Done(): } case <-ctx.Done(): @@ -184,8 +264,24 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*HeadChange { return out } -func (cs *ChainStore) SubscribeHeadChanges(f func(rev, app []*types.TipSet) error) { - cs.headChangeNotifs = append(cs.headChangeNotifs, f) +func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { + cs.reorgNotifeeCh <- f +} + +func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { + key := blockValidationCacheKeyPrefix.Instance(blkid.String()) + + return cs.ds.Has(key) +} + +func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { + key := blockValidationCacheKeyPrefix.Instance(blkid.String()) + + if err := cs.ds.Put(key, []byte{0}); err != nil { + return xerrors.Errorf("cache block validation: %w", err) + } + + return nil } func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { @@ -220,6 +316,9 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { return nil } +// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our +// internal state as our new head, if and only if it is heavier than the current +// head. func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() @@ -246,13 +345,19 @@ type reorg struct { new *types.TipSet } -func (cs *ChainStore) reorgWorker(ctx context.Context) chan<- reorg { +func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg { out := make(chan reorg, 32) + notifees := make([]ReorgNotifee, len(initialNotifees)) + copy(notifees, initialNotifees) + go func() { defer log.Warn("reorgWorker quit") for { select { + case n := <-cs.reorgNotifeeCh: + notifees = append(notifees, n) + case r := <-out: revert, apply, err := cs.ReorgOps(r.old, r.new) if err != nil { @@ -260,13 +365,24 @@ func (cs *ChainStore) reorgWorker(ctx context.Context) chan<- reorg { continue } + journal.J.RecordEvent(cs.evtTypes[evtTypeHeadChange], func() interface{} { + return HeadChangeEvt{ + From: r.old.Key(), + FromHeight: r.old.Height(), + To: r.new.Key(), + ToHeight: r.new.Height(), + RevertCount: len(revert), + ApplyCount: len(apply), + } + }) + // reverse the apply array for i := len(apply)/2 - 1; i >= 0; i-- { opp := len(apply) - 1 - i apply[i], apply[opp] = apply[opp], apply[i] } - for _, hcf := range cs.headChangeNotifs { + for _, hcf := range notifees { if err := hcf(revert, apply); err != nil { log.Error("head change func errored (BAD): ", err) } @@ -279,6 +395,9 @@ func (cs *ChainStore) reorgWorker(ctx context.Context) chan<- reorg { return out } +// takeHeaviestTipSet actually sets the incoming tipset as our head both in +// memory and in the ChainStore. It also sends a notification to deliver to +// ReorgNotifees. func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error { _, span := trace.StartSpan(ctx, "takeHeaviestTipSet") defer span.End() @@ -316,6 +435,7 @@ func (cs *ChainStore) SetHead(ts *types.TipSet) error { return cs.takeHeaviestTipSet(context.TODO(), ts) } +// Contains returns whether our BlockStore has all blocks in the supplied TipSet. func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { has, err := cs.bs.Has(c) @@ -330,6 +450,8 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { return true, nil } +// GetBlock fetches a BlockHeader with the supplied CID. It returns +// blockstore.ErrNotFound if the block was not found in the BlockStore. func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { sb, err := cs.bs.Get(c) if err != nil { @@ -365,7 +487,7 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { return ts, nil } -// returns true if 'a' is an ancestor of 'b' +// IsAncestorOf returns true if 'a' is an ancestor of 'b' func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { if b.Height() <= a.Height() { return false, nil @@ -373,7 +495,7 @@ func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { cur := b for !a.Equals(cur) && cur.Height() > a.Height() { - next, err := cs.LoadTipSet(b.Parents()) + next, err := cs.LoadTipSet(cur.Parents()) if err != nil { return false, err } @@ -394,6 +516,10 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, } func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { + return ReorgOps(cs.LoadTipSet, a, b) +} + +func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { left := a right := b @@ -401,7 +527,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti for !left.Equals(right) { if left.Height() > right.Height() { leftChain = append(leftChain, left) - par, err := cs.LoadTipSet(left.Parents()) + par, err := lts(left.Parents()) if err != nil { return nil, nil, err } @@ -409,7 +535,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti left = par } else { rightChain = append(rightChain, right) - par, err := cs.LoadTipSet(right.Parents()) + par, err := lts(right.Parents()) if err != nil { log.Infof("failed to fetch right.Parents: %s", err) return nil, nil, err @@ -420,8 +546,10 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti } return leftChain, rightChain, nil + } +// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head). func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() @@ -572,13 +700,13 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { return types.DecodeBlock(genb.RawData()) } -func (cs *ChainStore) GetCMessage(c cid.Cid) (ChainMsg, error) { +func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { m, err := cs.GetMessage(c) if err == nil { return m, nil } if err != bstore.ErrNotFound { - log.Warn("GetCMessage: unexpected error getting unsigned message: %s", err) + log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err) } return cs.GetSignedMessage(c) @@ -605,86 +733,113 @@ func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) } func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { - bs := amt.WrapBlockstore(cs.bs) - a, err := amt.LoadAMT(bs, root) + ctx := context.TODO() + a, err := adt.AsArray(cs.Store(ctx), root) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } - var cids []cid.Cid - for i := uint64(0); i < a.Count; i++ { - var c cbg.CborCid - if err := a.Get(i, &c); err != nil { - return nil, xerrors.Errorf("failed to load cid from amt: %w", err) - } + var ( + cids []cid.Cid + cborCid cbg.CborCid + ) + if err := a.ForEach(&cborCid, func(i int64) error { + c := cid.Cid(cborCid) + cids = append(cids, c) + return nil + }); err != nil { + return nil, xerrors.Errorf("failed to traverse amt: %w", err) + } - cids = append(cids, cid.Cid(c)) + if uint64(len(cids)) != a.Length() { + return nil, xerrors.Errorf("found %d cids, expected %d", len(cids), a.Length()) } return cids, nil } -type ChainMsg interface { - Cid() cid.Cid - VMMessage() *types.Message - ToStorageBlock() (block.Block, error) +type BlockMessages struct { + Miner address.Address + BlsMessages []types.ChainMsg + SecpkMessages []types.ChainMsg + WinCount int64 } -func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]ChainMsg, error) { +func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { applied := make(map[address.Address]uint64) - balances := make(map[address.Address]types.BigInt) - cst := hamt.CSTFromBstore(cs.bs) - st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot) - if err != nil { - return nil, xerrors.Errorf("failed to load state tree") - } - - preloadAddr := func(a address.Address) error { - if _, ok := applied[a]; !ok { - act, err := st.GetActor(a) - if err != nil { - return err - } - - applied[a] = act.Nonce - balances[a] = act.Balance + selectMsg := func(m *types.Message) (bool, error) { + // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise + if _, ok := applied[m.From]; !ok { + applied[m.From] = m.Nonce } - return nil + + if applied[m.From] != m.Nonce { + return false, nil + } + + applied[m.From]++ + + return true, nil } - var out []ChainMsg + var out []BlockMessages for _, b := range ts.Blocks() { + bms, sms, err := cs.MessagesForBlock(b) if err != nil { return nil, xerrors.Errorf("failed to get messages for block: %w", err) } - cmsgs := make([]ChainMsg, 0, len(bms)+len(sms)) - for _, m := range bms { - cmsgs = append(cmsgs, m) - } - for _, sm := range sms { - cmsgs = append(cmsgs, sm) + bm := BlockMessages{ + Miner: b.Miner, + BlsMessages: make([]types.ChainMsg, 0, len(bms)), + SecpkMessages: make([]types.ChainMsg, 0, len(sms)), + WinCount: b.ElectionProof.WinCount, } - for _, cm := range cmsgs { - m := cm.VMMessage() - if err := preloadAddr(m.From); err != nil { - return nil, err + for _, bmsg := range bms { + b, err := selectMsg(bmsg.VMMessage()) + if err != nil { + return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err) } - if applied[m.From] != m.Nonce { - continue + if b { + bm.BlsMessages = append(bm.BlsMessages, bmsg) } - applied[m.From]++ + } - if balances[m.From].LessThan(m.RequiredFunds()) { - continue + for _, smsg := range sms { + b, err := selectMsg(smsg.VMMessage()) + if err != nil { + return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err) } - balances[m.From] = types.BigSub(balances[m.From], m.RequiredFunds()) - out = append(out, cm) + if b { + bm.SecpkMessages = append(bm.SecpkMessages, smsg) + } + } + + out = append(out, bm) + } + + return out, nil +} + +func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { + bmsgs, err := cs.BlockMsgsForTipset(ts) + if err != nil { + return nil, err + } + + var out []types.ChainMsg + for _, bm := range bmsgs { + for _, blsm := range bm.BlsMessages { + out = append(out, blsm) + } + + for _, secm := range bm.SecpkMessages { + out = append(out, secm) } } @@ -696,17 +851,17 @@ type mmCids struct { secpk []cid.Cid } -func (cs *ChainStore) readMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { +func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { o, ok := cs.mmCache.Get(mmc) if ok { mmcids := o.(*mmCids) return mmcids.bls, mmcids.secpk, nil } - cst := hamt.CSTFromBstore(cs.bs) + cst := cbor.NewCborStore(cs.bs) var msgmeta types.MsgMeta if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { - return nil, nil, xerrors.Errorf("failed to load msgmeta: %w", err) + return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) } blscids, err := cs.readAMTCids(msgmeta.BlsMessages) @@ -727,8 +882,32 @@ func (cs *ChainStore) readMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return blscids, secpkcids, nil } +func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { + fts, err := cs.LoadTipSet(from) + if err != nil { + return nil, xerrors.Errorf("loading from tipset %s: %w", from, err) + } + tts, err := cs.LoadTipSet(to) + if err != nil { + return nil, xerrors.Errorf("loading to tipset %s: %w", to, err) + } + revert, apply, err := cs.ReorgOps(fts, tts) + if err != nil { + return nil, xerrors.Errorf("error getting tipset branches: %w", err) + } + + path := make([]*api.HeadChange, len(revert)+len(apply)) + for i, r := range revert { + path[i] = &api.HeadChange{Type: HCRevert, Val: r} + } + for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 { + path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]} + } + return path, nil +} + func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - blscids, secpkcids, err := cs.readMsgMetaCids(b.Messages) + blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages) if err != nil { return nil, nil, err } @@ -747,15 +926,17 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, } func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { - bs := amt.WrapBlockstore(cs.bs) - a, err := amt.LoadAMT(bs, b.ParentMessageReceipts) + ctx := context.TODO() + a, err := adt.AsArray(cs.Store(ctx), b.ParentMessageReceipts) if err != nil { return nil, xerrors.Errorf("amt load: %w", err) } var r types.MessageReceipt - if err := a.Get(uint64(i), &r); err != nil { + if found, err := a.Get(uint64(i), &r); err != nil { return nil, err + } else if !found { + return nil, xerrors.Errorf("failed to find receipt %d", i) } return &r, nil @@ -766,7 +947,7 @@ func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, er for i, c := range cids { m, err := cs.GetMessage(c) if err != nil { - return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) + return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } msgs = append(msgs, m) @@ -780,7 +961,7 @@ func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.Signe for i, c := range cids { m, err := cs.GetSignedMessage(c) if err != nil { - return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) + return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } msgs = append(msgs, m) @@ -793,6 +974,18 @@ func (cs *ChainStore) Blockstore() bstore.Blockstore { return cs.bs } +func ActorStore(ctx context.Context, bs bstore.Blockstore) adt.Store { + return adt.WrapStore(ctx, cbor.NewCborStore(bs)) +} + +func (cs *ChainStore) Store(ctx context.Context) adt.Store { + return ActorStore(ctx, cs.bs) +} + +func (cs *ChainStore) VMSys() vm.SyscallBuilder { + return cs.vmcalls +} + func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { var out []*types.FullBlock @@ -815,91 +1008,340 @@ func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { return NewFullTipSet(out), nil } -func drawRandomness(t *types.Ticket, round int64) []byte { - h := sha256.New() - var buf [8]byte - binary.LittleEndian.PutUint64(buf[:], uint64(round)) - - h.Write(t.VRFProof) - h.Write(buf[:]) - - return h.Sum(nil) -} - -func (cs *ChainStore) GetRandomness(ctx context.Context, blks []cid.Cid, round int64) ([]byte, error) { - _, span := trace.StartSpan(ctx, "store.GetRandomness") - defer span.End() - span.AddAttributes(trace.Int64Attribute("round", round)) - - for { - nts, err := cs.LoadTipSet(types.NewTipSetKey(blks...)) - if err != nil { - return nil, err - } - - mtb := nts.MinTicketBlock() - - if int64(nts.Height()) <= round { - return drawRandomness(nts.MinTicketBlock().Ticket, round), nil - } - - // special case for lookback behind genesis block - // TODO(spec): this is not in the spec, need to sync that - if mtb.Height == 0 { - - // round is negative - thash := drawRandomness(mtb.Ticket, round*-1) - - // for negative lookbacks, just use the hash of the positive tickethash value - h := sha256.Sum256(thash) - return h[:], nil - } - - blks = mtb.Parents +func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + h := blake2b.New256() + if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil { + return nil, xerrors.Errorf("deriving randomness: %w", err) } + VRFDigest := blake2b.Sum256(rbase) + _, err := h.Write(VRFDigest[:]) + if err != nil { + return nil, xerrors.Errorf("hashing VRFDigest: %w", err) + } + if err := binary.Write(h, binary.BigEndian, round); err != nil { + return nil, xerrors.Errorf("deriving randomness: %w", err) + } + _, err = h.Write(entropy) + if err != nil { + return nil, xerrors.Errorf("hashing entropy: %w", err) + } + + return h.Sum(nil), nil } -func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h uint64, ts *types.TipSet) (*types.TipSet, error) { +func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + _, span := trace.StartSpan(ctx, "store.GetBeaconRandomness") + defer span.End() + span.AddAttributes(trace.Int64Attribute("round", int64(round))) + + ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...)) + if err != nil { + return nil, err + } + + if round > ts.Height() { + return nil, xerrors.Errorf("cannot draw randomness from the future") + } + + searchHeight := round + if searchHeight < 0 { + searchHeight = 0 + } + + randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) + if err != nil { + return nil, err + } + + be, err := cs.GetLatestBeaconEntry(randTs) + if err != nil { + return nil, err + } + + // if at (or just past -- for null epochs) appropriate epoch + // or at genesis (works for negative epochs) + return DrawRandomness(be.Data, pers, round, entropy) +} + +func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + _, span := trace.StartSpan(ctx, "store.GetChainRandomness") + defer span.End() + span.AddAttributes(trace.Int64Attribute("round", int64(round))) + + ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...)) + if err != nil { + return nil, err + } + + if round > ts.Height() { + return nil, xerrors.Errorf("cannot draw randomness from the future") + } + + searchHeight := round + if searchHeight < 0 { + searchHeight = 0 + } + + randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) + if err != nil { + return nil, err + } + + mtb := randTs.MinTicketBlock() + + // if at (or just past -- for null epochs) appropriate epoch + // or at genesis (works for negative epochs) + return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy) +} + +// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given +// height. In the case that the given height is a null round, the 'prev' flag +// selects the tipset before the null round if true, and the tipset following +// the null round if false. +func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) { if ts == nil { ts = cs.GetHeaviestTipSet() } if h > ts.Height() { - return nil, xerrors.Errorf("looking for tipset with height less than start point") + return nil, xerrors.Errorf("looking for tipset with height greater than start point") } - if ts.Height()-h > build.ForkLengthThreshold { - log.Warnf("expensive call to GetTipsetByHeight, seeking %d levels", ts.Height()-h) + if h == ts.Height() { + return ts, nil } - for { - pts, err := cs.LoadTipSet(ts.Parents()) + lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h) + if err != nil { + return nil, err + } + + if lbts.Height() < h { + log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h) + lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h) if err != nil { return nil, err } + } - if h > pts.Height() { - return ts, nil + if lbts.Height() == h || !prev { + return lbts, nil + } + + return cs.LoadTipSet(lbts.Parents()) +} + +func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { + if root.Prefix().Codec != cid.DagCBOR { + return in, nil + } + + data, err := bs.Get(root) + if err != nil { + return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err) + } + + var rerr error + err = cbg.ScanForLinks(bytes.NewReader(data.RawData()), func(c cid.Cid) { + if rerr != nil { + // No error return on ScanForLinks :( + return } - ts = pts + // traversed this already... + if !walked.Visit(c) { + return + } + + in = append(in, c) + var err error + in, err = recurseLinks(bs, walked, c, in) + if err != nil { + rerr = err + } + }) + if err != nil { + return nil, xerrors.Errorf("scanning for links failed: %w", err) } + + return in, rerr +} + +func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error { + if ts == nil { + ts = cs.GetHeaviestTipSet() + } + + seen := cid.NewSet() + walked := cid.NewSet() + + h := &car.CarHeader{ + Roots: ts.Cids(), + Version: 1, + } + + if err := car.WriteHeader(h, w); err != nil { + return xerrors.Errorf("failed to write car header: %s", err) + } + + blocksToWalk := ts.Cids() + currentMinHeight := ts.Height() + + walkChain := func(blk cid.Cid) error { + if !seen.Visit(blk) { + return nil + } + + data, err := cs.bs.Get(blk) + if err != nil { + return xerrors.Errorf("getting block: %w", err) + } + + if err := carutil.LdWrite(w, blk.Bytes(), data.RawData()); err != nil { + return xerrors.Errorf("failed to write block to car output: %w", err) + } + + var b types.BlockHeader + if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil { + return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err) + } + + if currentMinHeight > b.Height { + currentMinHeight = b.Height + if currentMinHeight%builtin.EpochsInDay == 0 { + log.Infow("export", "height", currentMinHeight) + } + } + + var cids []cid.Cid + if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { + mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages}) + if err != nil { + return xerrors.Errorf("recursing messages failed: %w", err) + } + cids = mcids + } + + if b.Height > 0 { + for _, p := range b.Parents { + blocksToWalk = append(blocksToWalk, p) + } + } else { + // include the genesis block + cids = append(cids, b.Parents...) + } + + out := cids + + if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { + cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + if err != nil { + return xerrors.Errorf("recursing genesis state failed: %w", err) + } + + out = append(out, cids...) + } + + for _, c := range out { + if seen.Visit(c) { + if c.Prefix().Codec != cid.DagCBOR { + continue + } + data, err := cs.bs.Get(c) + if err != nil { + return xerrors.Errorf("writing object to car (get %s): %w", c, err) + } + + if err := carutil.LdWrite(w, c.Bytes(), data.RawData()); err != nil { + return xerrors.Errorf("failed to write out car object: %w", err) + } + } + } + + return nil + } + + log.Infow("export started") + exportStart := build.Clock.Now() + + for len(blocksToWalk) > 0 { + next := blocksToWalk[0] + blocksToWalk = blocksToWalk[1:] + if err := walkChain(next); err != nil { + return xerrors.Errorf("walk chain failed: %w", err) + } + } + + log.Infow("export finished", "duration", build.Clock.Now().Sub(exportStart).Seconds()) + + return nil +} + +func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { + header, err := car.LoadCar(cs.Blockstore(), r) + if err != nil { + return nil, xerrors.Errorf("loadcar failed: %w", err) + } + + root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...)) + if err != nil { + return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) + } + + return root, nil +} + +func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) { + cur := ts + for i := 0; i < 20; i++ { + cbe := cur.Blocks()[0].BeaconEntries + if len(cbe) > 0 { + return &cbe[len(cbe)-1], nil + } + + if cur.Height() == 0 { + return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") + } + + next, err := cs.LoadTipSet(cur.Parents()) + if err != nil { + return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) + } + cur = next + } + + if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" { + return &types.BeaconEntry{ + Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, + }, nil + } + + return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets") } type chainRand struct { cs *ChainStore blks []cid.Cid - bh uint64 } -func NewChainRand(cs *ChainStore, blks []cid.Cid, bheight uint64) vm.Rand { +func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand { return &chainRand{ cs: cs, blks: blks, - bh: bheight, } } -func (cr *chainRand) GetRandomness(ctx context.Context, round int64) ([]byte, error) { - return cr.cs.GetRandomness(ctx, cr.blks, round) +func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy) +} + +func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { + return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy) +} + +func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { + if tsk.IsEmpty() { + return cs.GetHeaviestTipSet(), nil + } + return cs.LoadTipSet(tsk) } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 4d6d11c1b..b7adfb595 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -1,20 +1,27 @@ package store_test import ( + "bytes" "context" "testing" - "github.com/filecoin-project/lotus/build" + datastore "github.com/ipfs/go-datastore" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/node/repo" - blockstore "github.com/ipfs/go-ipfs-blockstore" ) func init() { - build.SectorSizes = []uint64{1024} - build.MinimumMinerPower = 1024 + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) } func BenchmarkGetRandomness(b *testing.B) { @@ -43,7 +50,7 @@ func BenchmarkGetRandomness(b *testing.B) { b.Fatal(err) } - bds, err := lr.Datastore("/blocks") + bds, err := lr.Datastore("/chain") if err != nil { b.Fatal(err) } @@ -55,14 +62,48 @@ func BenchmarkGetRandomness(b *testing.B) { bs := blockstore.NewBlockstore(bds) - cs := store.NewChainStore(bs, mds) + cs := store.NewChainStore(bs, mds, nil) b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := cs.GetRandomness(context.TODO(), last.Cids(), 500) + _, err := cs.GetChainRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) if err != nil { b.Fatal(err) } } } + +func TestChainExportImport(t *testing.T) { + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + var last *types.TipSet + for i := 0; i < 100; i++ { + ts, err := cg.NextTipSet() + if err != nil { + t.Fatal(err) + } + + last = ts.TipSet.TipSet() + } + + buf := new(bytes.Buffer) + if err := cg.ChainStore().Export(context.TODO(), last, 0, false, buf); err != nil { + t.Fatal(err) + } + + nbs := blockstore.NewTemporary() + cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil) + + root, err := cs.Import(buf) + if err != nil { + t.Fatal(err) + } + + if !root.Equals(last) { + t.Fatal("imported chain differed from exported chain") + } +} diff --git a/chain/store/weight.go b/chain/store/weight.go index 901356728..9100df315 100644 --- a/chain/store/weight.go +++ b/chain/store/weight.go @@ -4,10 +4,13 @@ import ( "context" "math/big" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + + big2 "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" + cbor "github.com/ipfs/go-ipld-cbor" "golang.org/x/xerrors" ) @@ -17,25 +20,39 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn if ts == nil { return types.NewInt(0), nil } - // >>> w[r] <<< + wFunction(totalPowerAtTipset(ts)) * 2^8 + (wFunction(totalPowerAtTipset(ts)) * len(ts.blocks) * wRatio_num * 2^8) / (e * wRatio_den) + // >>> w[r] <<< + wFunction(totalPowerAtTipset(ts)) * 2^8 + (wFunction(totalPowerAtTipset(ts)) * sum(ts.blocks[].ElectionProof.WinCount) * wRatio_num * 2^8) / (e * wRatio_den) - var out = new(big.Int).Set(ts.Blocks()[0].ParentWeight.Int) + var out = new(big.Int).Set(ts.ParentWeight().Int) - // >>> wFunction(totalPowerAtTipset(ts)) * 2^8 <<< + (wFunction(totalPowerAtTipset(ts)) * len(ts.blocks) * wRatio_num * 2^8) / (e * wRatio_den) + // >>> wFunction(totalPowerAtTipset(ts)) * 2^8 <<< + (wFunction(totalPowerAtTipset(ts)) * sum(ts.blocks[].ElectionProof.WinCount) * wRatio_num * 2^8) / (e * wRatio_den) - ret, err := cs.call(ctx, &types.Message{ - From: actors.StoragePowerAddress, - To: actors.StoragePowerAddress, - Method: actors.SPAMethods.GetTotalStorage, - }, ts) - if err != nil { - return types.EmptyInt, xerrors.Errorf("failed to get total power from chain: %w", err) - } - if ret.ExitCode != 0 { - return types.EmptyInt, xerrors.Errorf("failed to get total power from chain (exit code %d)", ret.ExitCode) + tpow := big2.Zero() + { + cst := cbor.NewCborStore(cs.Blockstore()) + state, err := state.LoadStateTree(cst, ts.ParentState()) + if err != nil { + return types.NewInt(0), xerrors.Errorf("load state tree: %w", err) + } + + act, err := state.GetActor(power.Address) + if err != nil { + return types.NewInt(0), xerrors.Errorf("get power actor: %w", err) + } + + powState, err := power.Load(cs.Store(ctx), act) + if err != nil { + return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err) + } + + claim, err := powState.TotalPower() + if err != nil { + return types.NewInt(0), xerrors.Errorf("failed to get total power: %w", err) + } + + tpow = claim.QualityAdjPower // TODO: REVIEW: Is this correct? } + log2P := int64(0) - tpow := types.BigFromBytes(ret.Return) if tpow.GreaterThan(zero) { log2P = int64(tpow.BitLen() - 1) } else { @@ -45,51 +62,19 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn out.Add(out, big.NewInt(log2P<<8)) - // (wFunction(totalPowerAtTipset(ts)) * len(ts.blocks) * wRatio_num * 2^8) / (e * wRatio_den) + // (wFunction(totalPowerAtTipset(ts)) * sum(ts.blocks[].ElectionProof.WinCount) * wRatio_num * 2^8) / (e * wRatio_den) - eWeight := big.NewInt((log2P * int64(len(ts.Blocks())) * build.WRatioNum) << 8) - eWeight.Div(eWeight, big.NewInt(int64(build.BlocksPerEpoch*build.WRatioDen))) - out.Add(out, eWeight) + totalJ := int64(0) + for _, b := range ts.Blocks() { + totalJ += b.ElectionProof.WinCount + } + + eWeight := big.NewInt((log2P * build.WRatioNum)) + eWeight = eWeight.Lsh(eWeight, 8) + eWeight = eWeight.Mul(eWeight, new(big.Int).SetInt64(totalJ)) + eWeight = eWeight.Div(eWeight, big.NewInt(int64(build.BlocksPerEpoch*build.WRatioDen))) + + out = out.Add(out, eWeight) return types.BigInt{Int: out}, nil } - -// todo: dedupe with state manager -func (cs *ChainStore) call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*types.MessageReceipt, error) { - bstate := ts.ParentState() - - r := NewChainRand(cs, ts.Cids(), ts.Height()) - - vmi, err := vm.NewVM(bstate, ts.Height(), r, actors.NetworkAddress, cs.bs) - if err != nil { - return nil, xerrors.Errorf("failed to set up vm: %w", err) - } - - if msg.GasLimit == types.EmptyInt { - msg.GasLimit = types.NewInt(10000000000) - } - if msg.GasPrice == types.EmptyInt { - msg.GasPrice = types.NewInt(0) - } - if msg.Value == types.EmptyInt { - msg.Value = types.NewInt(0) - } - - fromActor, err := vmi.StateTree().GetActor(msg.From) - if err != nil { - return nil, xerrors.Errorf("call raw get actor: %s", err) - } - - msg.Nonce = fromActor.Nonce - - // TODO: maybe just use the invoker directly? - ret, err := vmi.ApplyMessage(ctx, msg) - if err != nil { - return nil, xerrors.Errorf("apply message failed: %w", err) - } - - if ret.ActorErr != nil { - log.Warnf("chain call failed: %s", ret.ActorErr) - } - return &ret.MessageReceipt, nil -} diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 79dc29b80..c6e0c8b80 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -2,20 +2,48 @@ package sub import ( "context" + "errors" + "fmt" + "sync" "time" - logging "github.com/ipfs/go-log" - connmgr "github.com/libp2p/go-libp2p-core/connmgr" - pubsub "github.com/libp2p/go-libp2p-pubsub" + "golang.org/x/xerrors" + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/specs-actors/actors/util/adt" + lru "github.com/hashicorp/golang-lru" + blocks "github.com/ipfs/go-block-format" + bserv "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + connmgr "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/peer" + pubsub "github.com/libp2p/go-libp2p-pubsub" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/lib/bufbstore" + "github.com/filecoin-project/lotus/lib/sigs" + "github.com/filecoin-project/lotus/metrics" ) var log = logging.Logger("sub") -func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, cmgr connmgr.ConnManager) { +var ErrSoftFailure = errors.New("soft validation failure") +var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power") + +func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bserv bserv.BlockService, cmgr connmgr.ConnManager) { for { msg, err := bsub.Next(ctx) if err != nil { @@ -27,32 +55,32 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha continue } - blk, err := types.DecodeBlockMsg(msg.GetData()) - if err != nil { - log.Error("got invalid block over pubsub: ", err) - continue + blk, ok := msg.ValidatorData.(*types.BlockMsg) + if !ok { + log.Warnf("pubsub block validator passed on wrong type: %#v", msg.ValidatorData) + return } + src := msg.GetFrom() + go func() { - log.Infof("New block over pubsub: %s", blk.Cid()) - - start := time.Now() + start := build.Clock.Now() log.Debug("about to fetch messages for block from pubsub") - bmsgs, err := s.Bsync.FetchMessagesByCids(context.TODO(), blk.BlsMessages) + bmsgs, err := FetchMessagesByCids(context.TODO(), bserv, blk.BlsMessages) if err != nil { - log.Errorf("failed to fetch all bls messages for block received over pubusb: %s", err) + log.Errorf("failed to fetch all bls messages for block received over pubusb: %s; source: %s", err, src) return } - smsgs, err := s.Bsync.FetchSignedMessagesByCids(context.TODO(), blk.SecpkMessages) + smsgs, err := FetchSignedMessagesByCids(context.TODO(), bserv, blk.SecpkMessages) if err != nil { - log.Errorf("failed to fetch all secpk messages for block received over pubusb: %s", err) + log.Errorf("failed to fetch all secpk messages for block received over pubusb: %s; source: %s", err, src) return } - took := time.Since(start) + took := build.Clock.Since(start) log.Infow("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) - if delay := time.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 { + if delay := build.Clock.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 { log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner) } @@ -67,9 +95,516 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha } } +func FetchMessagesByCids( + ctx context.Context, + bserv bserv.BlockService, + cids []cid.Cid, +) ([]*types.Message, error) { + out := make([]*types.Message, len(cids)) + + err := fetchCids(ctx, bserv, cids, func(i int, b blocks.Block) error { + msg, err := types.DecodeMessage(b.RawData()) + if err != nil { + return err + } + + // FIXME: We already sort in `fetchCids`, we are duplicating too much work, + // we don't need to pass the index. + if out[i] != nil { + return fmt.Errorf("received duplicate message") + } + + out[i] = msg + return nil + }) + if err != nil { + return nil, err + } + return out, nil +} + +// FIXME: Duplicate of above. +func FetchSignedMessagesByCids( + ctx context.Context, + bserv bserv.BlockService, + cids []cid.Cid, +) ([]*types.SignedMessage, error) { + out := make([]*types.SignedMessage, len(cids)) + + err := fetchCids(ctx, bserv, cids, func(i int, b blocks.Block) error { + smsg, err := types.DecodeSignedMessage(b.RawData()) + if err != nil { + return err + } + + if out[i] != nil { + return fmt.Errorf("received duplicate message") + } + + out[i] = smsg + return nil + }) + if err != nil { + return nil, err + } + return out, nil +} + +// Fetch `cids` from the block service, apply `cb` on each of them. Used +// by the fetch message functions above. +// We check that each block is received only once and we do not received +// blocks we did not request. +func fetchCids( + ctx context.Context, + bserv bserv.BlockService, + cids []cid.Cid, + cb func(int, blocks.Block) error, +) error { + // FIXME: Why don't we use the context here? + fetchedBlocks := bserv.GetBlocks(context.TODO(), cids) + + cidIndex := make(map[cid.Cid]int) + for i, c := range cids { + cidIndex[c] = i + } + + for i := 0; i < len(cids); i++ { + select { + case block, ok := <-fetchedBlocks: + if !ok { + // Closed channel, no more blocks fetched, check if we have all + // of the CIDs requested. + // FIXME: Review this check. We don't call the callback on the + // last index? + if i == len(cids)-1 { + break + } + + return fmt.Errorf("failed to fetch all messages") + } + + ix, ok := cidIndex[block.Cid()] + if !ok { + return fmt.Errorf("received message we didnt ask for") + } + + if err := cb(ix, block); err != nil { + return err + } + } + } + + return nil +} + +type BlockValidator struct { + self peer.ID + + peers *lru.TwoQueueCache + + killThresh int + + recvBlocks *blockReceiptCache + + blacklist func(peer.ID) + + // necessary for block validation + chain *store.ChainStore + stmgr *stmgr.StateManager + + mx sync.Mutex + keycache map[string]address.Address +} + +func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator { + p, _ := lru.New2Q(4096) + return &BlockValidator{ + self: self, + peers: p, + killThresh: 10, + blacklist: blacklist, + recvBlocks: newBlockReceiptCache(), + chain: chain, + stmgr: stmgr, + keycache: make(map[string]address.Address), + } +} + +func (bv *BlockValidator) flagPeer(p peer.ID) { + v, ok := bv.peers.Get(p) + if !ok { + bv.peers.Add(p, int(1)) + return + } + + val := v.(int) + + if val >= bv.killThresh { + log.Warnf("blacklisting peer %s", p) + bv.blacklist(p) + return + } + + bv.peers.Add(p, v.(int)+1) +} + +func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult { + if pid == bv.self { + return bv.validateLocalBlock(ctx, msg) + } + + // track validation time + begin := build.Clock.Now() + defer func() { + log.Debugf("block validation time: %s", build.Clock.Since(begin)) + }() + + stats.Record(ctx, metrics.BlockReceived.M(1)) + + recordFailureFlagPeer := func(what string) { + recordFailure(ctx, metrics.BlockValidationFailure, what) + bv.flagPeer(pid) + } + + blk, what, err := bv.decodeAndCheckBlock(msg) + if err != nil { + log.Error("got invalid block over pubsub: ", err) + recordFailureFlagPeer(what) + return pubsub.ValidationReject + } + + // validate the block meta: the Message CID in the header must match the included messages + err = bv.validateMsgMeta(ctx, blk) + if err != nil { + log.Warnf("error validating message metadata: %s", err) + recordFailureFlagPeer("invalid_block_meta") + return pubsub.ValidationReject + } + + // we want to ensure that it is a block from a known miner; we reject blocks from unknown miners + // to prevent spam attacks. + // the logic works as follows: we lookup the miner in the chain for its key. + // if we can find it then it's a known miner and we can validate the signature. + // if we can't find it, we check whether we are (near) synced in the chain. + // if we are not synced we cannot validate the block and we must ignore it. + // if we are synced and the miner is unknown, then the block is rejcected. + key, err := bv.checkPowerAndGetWorkerKey(ctx, blk.Header) + if err != nil { + if err != ErrSoftFailure && bv.isChainNearSynced() { + log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message") + recordFailureFlagPeer("unknown_miner") + return pubsub.ValidationReject + } + + log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain") + return pubsub.ValidationIgnore + } + + err = sigs.CheckBlockSignature(ctx, blk.Header, key) + if err != nil { + log.Errorf("block signature verification failed: %s", err) + recordFailureFlagPeer("signature_verification_failed") + return pubsub.ValidationReject + } + + if blk.Header.ElectionProof.WinCount < 1 { + log.Errorf("block is not claiming to be winning") + recordFailureFlagPeer("not_winning") + return pubsub.ValidationReject + } + + // it's a good block! make sure we've only seen it once + if bv.recvBlocks.add(blk.Header.Cid()) > 0 { + // TODO: once these changes propagate to the network, we can consider + // dropping peers who send us the same block multiple times + return pubsub.ValidationIgnore + } + + // all good, accept the block + msg.ValidatorData = blk + stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) + return pubsub.ValidationAccept +} + +func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult { + stats.Record(ctx, metrics.BlockPublished.M(1)) + + blk, what, err := bv.decodeAndCheckBlock(msg) + if err != nil { + log.Errorf("got invalid local block: %s", err) + ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what)) + stats.Record(ctx, metrics.BlockValidationFailure.M(1)) + return pubsub.ValidationIgnore + } + + if count := bv.recvBlocks.add(blk.Header.Cid()); count > 0 { + log.Warnf("local block has been seen %d times; ignoring", count) + return pubsub.ValidationIgnore + } + + msg.ValidatorData = blk + stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) + return pubsub.ValidationAccept +} + +func (bv *BlockValidator) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) { + blk, err := types.DecodeBlockMsg(msg.GetData()) + if err != nil { + return nil, "invalid", xerrors.Errorf("error decoding block: %w", err) + } + + if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit { + return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count) + } + + // make sure we have a signature + if blk.Header.BlockSig == nil { + return nil, "missing_signature", fmt.Errorf("block without a signature") + } + + return blk, "", nil +} + +func (bv *BlockValidator) isChainNearSynced() bool { + ts := bv.chain.GetHeaviestTipSet() + timestamp := ts.MinTimestamp() + timestampTime := time.Unix(int64(timestamp), 0) + return build.Clock.Since(timestampTime) < 6*time.Hour +} + +func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { + // TODO there has to be a simpler way to do this without the blockstore dance + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewTemporary())) + bmArr := adt.MakeEmptyArray(store) + smArr := adt.MakeEmptyArray(store) + + for i, m := range msg.BlsMessages { + c := cbg.CborCid(m) + if err := bmArr.Set(uint64(i), &c); err != nil { + return err + } + } + + for i, m := range msg.SecpkMessages { + c := cbg.CborCid(m) + if err := smArr.Set(uint64(i), &c); err != nil { + return err + } + } + + bmroot, err := bmArr.Root() + if err != nil { + return err + } + + smroot, err := smArr.Root() + if err != nil { + return err + } + + mrcid, err := store.Put(store.Context(), &types.MsgMeta{ + BlsMessages: bmroot, + SecpkMessages: smroot, + }) + + if err != nil { + return err + } + + if msg.Header.Messages != mrcid { + return fmt.Errorf("messages didn't match root cid in header") + } + + return nil +} + +func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) { + addr := bh.Miner + + bv.mx.Lock() + key, ok := bv.keycache[addr.String()] + bv.mx.Unlock() + if !ok { + // TODO I have a feeling all this can be simplified by cleverer DI to use the API + ts := bv.chain.GetHeaviestTipSet() + st, _, err := bv.stmgr.TipSetState(ctx, ts) + if err != nil { + return address.Undef, err + } + + buf := bufbstore.NewBufferedBstore(bv.chain.Blockstore()) + cst := cbor.NewCborStore(buf) + state, err := state.LoadStateTree(cst, st) + if err != nil { + return address.Undef, err + } + act, err := state.GetActor(addr) + if err != nil { + return address.Undef, err + } + + mst, err := miner.Load(bv.chain.Store(ctx), act) + if err != nil { + return address.Undef, err + } + + info, err := mst.Info() + if err != nil { + return address.Undef, err + } + + worker := info.Worker + key, err = bv.stmgr.ResolveToKeyAddress(ctx, worker, ts) + if err != nil { + return address.Undef, err + } + + bv.mx.Lock() + bv.keycache[addr.String()] = key + bv.mx.Unlock() + } + + // we check that the miner met the minimum power at the lookback tipset + + baseTs := bv.chain.GetHeaviestTipSet() + lbts, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height) + if err != nil { + log.Warnf("failed to load lookback tipset for incoming block: %s", err) + return address.Undef, ErrSoftFailure + } + + hmp, err := stmgr.MinerHasMinPower(ctx, bv.stmgr, bh.Miner, lbts) + if err != nil { + log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err) + return address.Undef, ErrSoftFailure + } + + if !hmp { + log.Warnf("incoming block's miner does not have minimum power") + return address.Undef, ErrInsufficientPower + } + + return key, nil +} + +type blockReceiptCache struct { + blocks *lru.TwoQueueCache +} + +func newBlockReceiptCache() *blockReceiptCache { + c, _ := lru.New2Q(8192) + + return &blockReceiptCache{ + blocks: c, + } +} + +func (brc *blockReceiptCache) add(bcid cid.Cid) int { + val, ok := brc.blocks.Get(bcid) + if !ok { + brc.blocks.Add(bcid, int(1)) + return 0 + } + + brc.blocks.Add(bcid, val.(int)+1) + return val.(int) +} + +type MessageValidator struct { + self peer.ID + mpool *messagepool.MessagePool +} + +func NewMessageValidator(self peer.ID, mp *messagepool.MessagePool) *MessageValidator { + return &MessageValidator{self: self, mpool: mp} +} + +func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult { + if pid == mv.self { + return mv.validateLocalMessage(ctx, msg) + } + + stats.Record(ctx, metrics.MessageReceived.M(1)) + m, err := types.DecodeSignedMessage(msg.Message.GetData()) + if err != nil { + log.Warnf("failed to decode incoming message: %s", err) + ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, "decode")) + stats.Record(ctx, metrics.MessageValidationFailure.M(1)) + return pubsub.ValidationReject + } + + if err := mv.mpool.Add(m); err != nil { + log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err) + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.Local, "false"), + ) + recordFailure(ctx, metrics.MessageValidationFailure, "add") + switch { + case xerrors.Is(err, messagepool.ErrSoftValidationFailure): + fallthrough + case xerrors.Is(err, messagepool.ErrRBFTooLowPremium): + fallthrough + case xerrors.Is(err, messagepool.ErrTooManyPendingMessages): + fallthrough + case xerrors.Is(err, messagepool.ErrNonceGap): + fallthrough + case xerrors.Is(err, messagepool.ErrNonceTooLow): + return pubsub.ValidationIgnore + default: + return pubsub.ValidationReject + } + } + stats.Record(ctx, metrics.MessageValidationSuccess.M(1)) + return pubsub.ValidationAccept +} + +func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult { + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.Local, "true"), + ) + // do some lightweight validation + stats.Record(ctx, metrics.MessagePublished.M(1)) + + m, err := types.DecodeSignedMessage(msg.Message.GetData()) + if err != nil { + log.Warnf("failed to decode local message: %s", err) + recordFailure(ctx, metrics.MessageValidationFailure, "decode") + return pubsub.ValidationIgnore + } + + if m.Size() > 32*1024 { + log.Warnf("local message is too large! (%dB)", m.Size()) + recordFailure(ctx, metrics.MessageValidationFailure, "oversize") + return pubsub.ValidationIgnore + } + + if m.Message.To == address.Undef { + log.Warn("local message has invalid destination address") + recordFailure(ctx, metrics.MessageValidationFailure, "undef-addr") + return pubsub.ValidationIgnore + } + + if !m.Message.Value.LessThan(types.TotalFilecoinInt) { + log.Warnf("local messages has too high value: %s", m.Message.Value) + recordFailure(ctx, metrics.MessageValidationFailure, "value-too-high") + return pubsub.ValidationIgnore + } + + if err := mv.mpool.VerifyMsgSig(m); err != nil { + log.Warnf("signature verification failed for local message: %s", err) + recordFailure(ctx, metrics.MessageValidationFailure, "verify-sig") + return pubsub.ValidationIgnore + } + + stats.Record(ctx, metrics.MessageValidationSuccess.M(1)) + return pubsub.ValidationAccept +} + func HandleIncomingMessages(ctx context.Context, mpool *messagepool.MessagePool, msub *pubsub.Subscription) { for { - msg, err := msub.Next(ctx) + _, err := msub.Next(ctx) if err != nil { log.Warn("error from message subscription: ", err) if ctx.Err() != nil { @@ -79,15 +614,14 @@ func HandleIncomingMessages(ctx context.Context, mpool *messagepool.MessagePool, continue } - m, err := types.DecodeSignedMessage(msg.GetData()) - if err != nil { - log.Errorf("got incorrectly formatted Message: %s", err) - continue - } - - if err := mpool.Add(m); err != nil { - log.Warnf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err) - continue - } + // Do nothing... everything happens in validate } } + +func recordFailure(ctx context.Context, metric *stats.Int64Measure, failureType string) { + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.FailureType, failureType), + ) + stats.Record(ctx, metric.M(1)) +} diff --git a/chain/sync.go b/chain/sync.go index 9065bc099..9e098a57e 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -3,50 +3,102 @@ package chain import ( "bytes" "context" - "crypto/sha256" "errors" "fmt" + "os" + "sort" + "strings" "sync" "time" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + "github.com/Gurpartap/async" - bls "github.com/filecoin-project/filecoin-ffi" - amt "github.com/filecoin-project/go-amt-ipld" - sectorbuilder "github.com/filecoin-project/go-sectorbuilder" "github.com/hashicorp/go-multierror" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - dstore "github.com/ipfs/go-datastore" - hamt "github.com/ipfs/go-hamt-ipld" - bstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" "github.com/whyrusleeping/pubsub" + "go.opencensus.io/stats" "go.opencensus.io/trace" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/specs-actors/actors/util/adt" + blst "github.com/supranational/blst/bindings/go" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/blocksync" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + bstore "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/lib/sigs" + "github.com/filecoin-project/lotus/lib/sigs/bls" + "github.com/filecoin-project/lotus/metrics" ) -var log = logging.Logger("chain") +// Blocks that are more than MaxHeightDrift epochs above +// the theoretical max height based on systime are quickly rejected +const MaxHeightDrift = 5 -var LocalIncoming = "incoming" +var ( + // LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic + // where the Syncer publishes candidate chain heads to be synced. + LocalIncoming = "incoming" + log = logging.Logger("chain") + + concurrentSyncRequests = exchange.ShufflePeersPrefix + syncRequestBatchSize = 8 + syncRequestRetries = 5 +) + +// Syncer is in charge of running the chain synchronization logic. As such, it +// is tasked with these functions, amongst others: +// +// * Fast-forwards the chain as it learns of new TipSets from the network via +// the SyncManager. +// * Applies the fork choice rule to select the correct side when confronted +// with a fork in the network. +// * Requests block headers and messages from other peers when not available +// in our BlockStore. +// * Tracks blocks marked as bad in a cache. +// * Keeps the BlockStore and ChainStore consistent with our view of the world, +// the latter of which in turn informs other components when a reorg has been +// committed. +// +// The Syncer does not run workers itself. It's mainly concerned with +// ensuring a consistent state of chain consensus. The reactive and network- +// interfacing processes are part of other components, such as the SyncManager +// (which owns the sync scheduler and sync workers), ChainExchange, the HELLO +// protocol, and the gossipsub block propagation layer. +// +// {hint/concept} The fork-choice rule as it currently stands is: "pick the +// chain with the heaviest weight, so long as it hasn’t deviated one finality +// threshold from our head (900 epochs, parameter determined by spec-actors)". type Syncer struct { // The interface for accessing and putting tipsets into local storage store *store.ChainStore + // handle to the random beacon for verification + beacon beacon.Schedule + // the state manager handles making state queries sm *stmgr.StateManager @@ -57,23 +109,36 @@ type Syncer struct { bad *BadBlockCache // handle to the block sync service - Bsync *blocksync.BlockSync + Exchange exchange.Client self peer.ID - syncmgr *SyncManager + syncmgr SyncManager connmgr connmgr.ConnManager incoming *pubsub.PubSub receiptTracker *blockReceiptTracker + + verifier ffiwrapper.Verifier + + tickerCtxCancel context.CancelFunc + + checkptLk sync.Mutex + + checkpt types.TipSetKey + + ds dtypes.MetadataDS } -func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID) (*Syncer, error) { +type SyncManagerCtor func(syncFn SyncFunc) SyncManager + +// NewSyncer creates a new Syncer object. +func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) { gen, err := sm.ChainStore().GetGenesis() if err != nil { - return nil, err + return nil, xerrors.Errorf("getting genesis block: %w", err) } gent, err := types.NewTipSet([]*types.BlockHeader{gen}) @@ -81,29 +146,68 @@ func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connm return nil, err } + cp, err := loadCheckpoint(ds) + if err != nil { + return nil, xerrors.Errorf("error loading mpool config: %w", err) + } + s := &Syncer{ + ds: ds, + checkpt: cp, + beacon: beacon, bad: NewBadBlockCache(), Genesis: gent, - Bsync: bsync, + Exchange: exchange, store: sm.ChainStore(), sm: sm, self: self, receiptTracker: newBlockReceiptTracker(), connmgr: connmgr, + verifier: verifier, incoming: pubsub.New(50), } - s.syncmgr = NewSyncManager(s.Sync) + if build.InsecurePoStValidation { + log.Warn("*********************************************************************************************") + log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ") + log.Warn("*********************************************************************************************") + } + + s.syncmgr = syncMgrCtor(s.Sync) return s, nil } func (syncer *Syncer) Start() { + tickerCtx, tickerCtxCancel := context.WithCancel(context.Background()) syncer.syncmgr.Start() + + syncer.tickerCtxCancel = tickerCtxCancel + + go syncer.runMetricsTricker(tickerCtx) +} + +func (syncer *Syncer) runMetricsTricker(tickerCtx context.Context) { + genesisTime := time.Unix(int64(syncer.Genesis.MinTimestamp()), 0) + ticker := build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + sinceGenesis := build.Clock.Now().Sub(genesisTime) + expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) + + stats.Record(tickerCtx, metrics.ChainNodeHeightExpected.M(expectedHeight)) + case <-tickerCtx.Done(): + return + } + } } func (syncer *Syncer) Stop() { syncer.syncmgr.Stop() + syncer.tickerCtxCancel() } // InformNewHead informs the syncer about a new potential tipset @@ -116,7 +220,16 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { return false } + if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) { + log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height()) + return false + } + for _, b := range fts.Blocks { + if reason, ok := syncer.bad.Has(b.Cid()); ok { + log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason) + return false + } if err := syncer.ValidateMsgMeta(b); err != nil { log.Warnf("invalid block received: %s", err) return false @@ -144,10 +257,10 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { return false } - syncer.Bsync.AddPeer(from) + syncer.Exchange.AddPeer(from) - bestPweight := syncer.store.GetHeaviestTipSet().Blocks()[0].ParentWeight - targetWeight := fts.TipSet().Blocks()[0].ParentWeight + bestPweight := syncer.store.GetHeaviestTipSet().ParentWeight() + targetWeight := fts.TipSet().ParentWeight() if targetWeight.LessThan(bestPweight) { var miners []string for _, blk := range fts.TipSet().Blocks() { @@ -161,6 +274,11 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { return true } +// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to +// receive new block headers as they arrive from the network, and sends them to +// the returned channel. +// +// These blocks have not necessarily been incorporated to our view of the chain. func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { sub := syncer.incoming.Sub(LocalIncoming) out := make(chan *types.BlockHeader, 10) @@ -188,16 +306,22 @@ func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHe return out, nil } +// ValidateMsgMeta performs structural and content hash validation of the +// messages within this block. If validation passes, it stores the messages in +// the underlying IPLD block store. func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { - var bcids, scids []cbg.CBORMarshaler + if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit { + return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) + } + + // Collect the CIDs of both types of messages separately: BLS and Secpk. + var bcids, scids []cid.Cid for _, m := range fblk.BlsMessages { - c := cbg.CborCid(m.Cid()) - bcids = append(bcids, &c) + bcids = append(bcids, m.Cid()) } for _, m := range fblk.SecpkMessages { - c := cbg.CborCid(m.Cid()) - scids = append(scids, &c) + scids = append(scids, m.Cid()) } // TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta @@ -205,12 +329,15 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { // we implement that blockstore := syncer.store.Blockstore() - bs := amt.WrapBlockstore(blockstore) + bs := cbor.NewCborStore(blockstore) + + // Compute the root CID of the combined message trie. smroot, err := computeMsgMeta(bs, bcids, scids) if err != nil { return xerrors.Errorf("validating msgmeta, compute failed: %w", err) } + // Check that the message trie root matches with what's in the block. if fblk.Header.Messages != smroot { return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot) } @@ -248,21 +375,28 @@ func (syncer *Syncer) InformNewBlock(from peer.ID, blk *types.FullBlock) bool { return syncer.InformNewHead(from, fts) } -func copyBlockstore(from, to bstore.Blockstore) error { - cids, err := from.AllKeysChan(context.TODO()) +func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { + ctx, span := trace.StartSpan(ctx, "copyBlockstore") + defer span.End() + + cids, err := from.AllKeysChan(ctx) if err != nil { return err } + // TODO: should probably expose better methods on the blockstore for this operation + var blks []blocks.Block for c := range cids { b, err := from.Get(c) if err != nil { return err } - if err := to.Put(b); err != nil { - return err - } + blks = append(blks, b) + } + + if err := to.PutMany(blks); err != nil { + return err } return nil @@ -272,27 +406,29 @@ func copyBlockstore(from, to bstore.Blockstore) error { // either validate it here, or ensure that its validated elsewhere (maybe make // sure the blocksync code checks it?) // maybe this code should actually live in blocksync?? -func zipTipSetAndMessages(bs amt.Blocks, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) { +func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) { if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) { return nil, fmt.Errorf("msgincl length didnt match tipset size") } fts := &store.FullTipSet{} for bi, b := range ts.Blocks() { + if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit { + return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc) + } + var smsgs []*types.SignedMessage - var smsgCids []cbg.CBORMarshaler + var smsgCids []cid.Cid for _, m := range smi[bi] { smsgs = append(smsgs, allsmsgs[m]) - c := cbg.CborCid(allsmsgs[m].Cid()) - smsgCids = append(smsgCids, &c) + smsgCids = append(smsgCids, allsmsgs[m].Cid()) } var bmsgs []*types.Message - var bmsgCids []cbg.CBORMarshaler + var bmsgCids []cid.Cid for _, m := range bmi[bi] { bmsgs = append(bmsgs, allbmsgs[m]) - c := cbg.CborCid(allbmsgs[m].Cid()) - bmsgCids = append(bmsgCids, &c) + bmsgCids = append(bmsgCids, allbmsgs[m].Cid()) } mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids) @@ -301,7 +437,7 @@ func zipTipSetAndMessages(bs amt.Blocks, ts *types.TipSet, allbmsgs []*types.Mes } if b.Messages != mrcid { - return nil, fmt.Errorf("messages didnt match message root in header") + return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key()) } fb := &types.FullBlock{ @@ -316,18 +452,38 @@ func zipTipSetAndMessages(bs amt.Blocks, ts *types.TipSet, allbmsgs []*types.Mes return fts, nil } -func computeMsgMeta(bs amt.Blocks, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid.Cid, error) { - bmroot, err := amt.FromArray(bs, bmsgCids) +// computeMsgMeta computes the root CID of the combined arrays of message CIDs +// of both types (BLS and Secpk). +func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) { + store := adt.WrapStore(context.TODO(), bs) + bmArr := adt.MakeEmptyArray(store) + smArr := adt.MakeEmptyArray(store) + + for i, m := range bmsgCids { + c := cbg.CborCid(m) + if err := bmArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + + for i, m := range smsgCids { + c := cbg.CborCid(m) + if err := smArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + + bmroot, err := bmArr.Root() if err != nil { return cid.Undef, err } - smroot, err := amt.FromArray(bs, smsgCids) + smroot, err := smArr.Root() if err != nil { return cid.Undef, err } - mrcid, err := bs.Put(&types.MsgMeta{ + mrcid, err := store.Put(store.Context(), &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, }) @@ -338,14 +494,24 @@ func computeMsgMeta(bs amt.Blocks, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid. return mrcid, nil } +// FetchTipSet tries to load the provided tipset from the store, and falls back +// to the network (client) by querying the supplied peer if not found +// locally. +// +// {hint/usage} This is used from the HELLO protocol, to fetch the greeting +// peer's heaviest tipset if we don't have it. func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { return fts, nil } - return syncer.Bsync.GetFullTipSet(ctx, p, tsk) + // fall back to the network. + return syncer.Exchange.GetFullTipSet(ctx, p, tsk) } +// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full +// representation of it containing FullBlocks. If ALL blocks are not found +// locally, it errors entirely with blockstore.ErrNotFound. func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { ts, err := syncer.store.LoadTipSet(tsk) if err != nil { @@ -370,6 +536,12 @@ func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, return fts, nil } +// Sync tries to advance our view of the chain to `maybeHead`. It does nothing +// if our current head is heavier than the requested tipset, or if we're already +// at the requested head, or if the head is the genesis. +// +// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the +// godocs on that method for a more detailed view. func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { ctx, span := trace.StartSpan(ctx, "chain.Sync") defer span.End() @@ -398,6 +570,8 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { return xerrors.Errorf("collectChain failed: %w", err) } + // At this point we have accepted and synced to the new `maybeHead` + // (`StageSyncComplete`). if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil { span.AddAttributes(trace.StringAttribute("put_error", err.Error())) span.SetStatus(trace.Status{ @@ -434,43 +608,49 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet) return nil } + var futures []async.ErrorFuture for _, b := range fts.Blocks { - if err := syncer.ValidateBlock(ctx, b); err != nil { - if isPermanent(err) { - syncer.bad.Add(b.Cid()) - } - return xerrors.Errorf("validating block %s: %w", b.Cid(), err) - } + b := b // rebind to a scoped variable - if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { - return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) + futures = append(futures, async.Err(func() error { + if err := syncer.ValidateBlock(ctx, b); err != nil { + if isPermanent(err) { + syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error())) + } + return xerrors.Errorf("validating block %s: %w", b.Cid(), err) + } + + if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { + return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) + } + return nil + })) + } + for _, f := range futures { + if err := f.AwaitContext(ctx); err != nil { + return err } } return nil } func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error { - var err error - enc, err := actors.SerializeParams(&actors.IsValidMinerParam{Addr: maddr}) + act, err := syncer.sm.LoadActor(ctx, power.Address, baseTs) if err != nil { - return err + return xerrors.Errorf("failed to load power actor: %w", err) } - ret, err := syncer.sm.Call(ctx, &types.Message{ - To: actors.StoragePowerAddress, - From: maddr, - Method: actors.SPAMethods.IsValidMiner, - Params: enc, - }, baseTs) + powState, err := power.Load(syncer.store.Store(ctx), act) if err != nil { - return xerrors.Errorf("checking if block miner is valid failed: %w", err) + return xerrors.Errorf("failed to load power actor state: %w", err) } - if ret.ExitCode != 0 { - return xerrors.Errorf("StorageMarket.IsValidMiner check failed (exit code %d)", ret.ExitCode) + _, exist, err := powState.MinerPower(maddr) + if err != nil { + return xerrors.Errorf("failed to look up miner's claim: %w", err) } - if !bytes.Equal(ret.Return, cbg.CborBoolTrue) { + if !exist { return xerrors.New("miner isn't valid") } @@ -479,12 +659,56 @@ func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, b var ErrTemporal = errors.New("temporal error") -// Should match up with 'Semantical Validation' in validation.md in the spec -func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) error { +func blockSanityChecks(h *types.BlockHeader) error { + if h.ElectionProof == nil { + return xerrors.Errorf("block cannot have nil election proof") + } + + if h.Ticket == nil { + return xerrors.Errorf("block cannot have nil ticket") + } + + if h.BlockSig == nil { + return xerrors.Errorf("block had nil signature") + } + + if h.BLSAggregate == nil { + return xerrors.Errorf("block had nil bls aggregate signature") + } + + return nil +} + +// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec +func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) { + defer func() { + // b.Cid() could panic for empty blocks that are used in tests. + if rerr := recover(); rerr != nil { + err = xerrors.Errorf("validate block panic: %w", rerr) + return + } + }() + + isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid()) + if err != nil { + return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err) + } + + if isValidated { + return nil + } + + validationStart := build.Clock.Now() + defer func() { + stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart))) + log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height, "age", time.Since(time.Unix(int64(b.Header.Timestamp), 0))) + }() + ctx, span := trace.StartSpan(ctx, "validateBlock") defer span.End() - if build.InsecurePoStValidation { - log.Warn("insecure test validation is enabled, if you see this outside of a test, it is a severe bug!") + + if err := blockSanityChecks(b.Header); err != nil { + return xerrors.Errorf("incoming header failed basic sanity checks: %w", err) } h := b.Header @@ -494,61 +718,35 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } + lbts, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height) + if err != nil { + return xerrors.Errorf("failed to get lookback tipset for block: %w", err) + } + + lbst, _, err := syncer.sm.TipSetState(ctx, lbts) + if err != nil { + return xerrors.Errorf("failed to compute lookback tipset state: %w", err) + } + + prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs) + if err != nil { + return xerrors.Errorf("failed to get latest beacon entry: %w", err) + } + // fast checks first - if h.BlockSig == nil { - return xerrors.Errorf("block had nil signature") + nulls := h.Height - (baseTs.Height() + 1) + if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs { + return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs) } - if h.Timestamp > uint64(time.Now().Unix()+build.AllowableClockDrift) { - return xerrors.Errorf("block was from the future: %w", ErrTemporal) + now := uint64(build.Clock.Now().Unix()) + if h.Timestamp > now+build.AllowableClockDriftSecs { + return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal) } - if h.Timestamp > uint64(time.Now().Unix()) { - log.Warn("Got block from the future, but within threshold", h.Timestamp, time.Now().Unix()) + if h.Timestamp > now { + log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix()) } - if h.Timestamp < baseTs.MinTimestamp()+(build.BlockDelay*(h.Height-baseTs.Height())) { - log.Warn("timestamp funtimes: ", h.Timestamp, baseTs.MinTimestamp(), h.Height, baseTs.Height()) - return xerrors.Errorf("block was generated too soon (h.ts:%d < base.mints:%d + BLOCK_DELAY:%d * deltaH:%d)", h.Timestamp, baseTs.MinTimestamp(), build.BlockDelay, h.Height-baseTs.Height()) - } - - winnerCheck := async.Err(func() error { - slashedAt, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner) - if err != nil { - return xerrors.Errorf("failed to check if block miner was slashed: %w", err) - } - - if slashedAt != 0 { - return xerrors.Errorf("received block was from miner slashed at height %d", slashedAt) - } - - mpow, tpow, err := stmgr.GetPower(ctx, syncer.sm, baseTs, h.Miner) - if err != nil { - return xerrors.Errorf("failed getting power: %w", err) - } - - ssize, err := stmgr.GetMinerSectorSize(ctx, syncer.sm, baseTs, h.Miner) - if err != nil { - return xerrors.Errorf("failed to get sector size for block miner: %w", err) - } - - snum := types.BigDiv(mpow, types.NewInt(ssize)) - - // FORK START - if h.Height > build.ForkCCM { - if len(h.EPostProof.Candidates) == 0 { - return xerrors.Errorf("no candidates") - } - } - // FORK END - - for _, t := range h.EPostProof.Candidates { - if !types.IsTicketWinner(t.Partial, ssize, snum.Uint64(), tpow) { - return xerrors.Errorf("miner created a block but was not a winner") - } - } - return nil - }) - msgsCheck := async.Err(func() error { if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil { return xerrors.Errorf("block had invalid messages: %w", err) @@ -563,6 +761,27 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err return nil }) + baseFeeCheck := async.Err(func() error { + baseFee, err := syncer.store.ComputeBaseFee(ctx, baseTs) + if err != nil { + return xerrors.Errorf("computing base fee: %w", err) + } + if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 { + return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)", + b.Header.ParentBaseFee, baseFee) + } + return nil + }) + pweight, err := syncer.store.Weight(ctx, baseTs) + if err != nil { + return xerrors.Errorf("getting parent weight: %w", err) + } + + if types.BigCmp(pweight, b.Header.ParentWeight) != 0 { + return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)", + b.Header.ParentWeight, pweight) + } + // Stuff that needs stateroot / worker address stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs) if err != nil { @@ -588,31 +807,112 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) } - waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, stateroot, h.Miner) + waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner) if err != nil { return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err) } + winnerCheck := async.Err(func() error { + if h.ElectionProof.WinCount < 1 { + return xerrors.Errorf("block is not claiming to be a winner") + } + + hp, err := stmgr.MinerHasMinPower(ctx, syncer.sm, h.Miner, lbts) + if err != nil { + return xerrors.Errorf("determining if miner has min power failed: %w", err) + } + + if !hp { + return xerrors.New("block's miner does not meet minimum power threshold") + } + + rBeacon := *prevBeacon + if len(h.BeaconEntries) != 0 { + rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1] + } + buf := new(bytes.Buffer) + if err := h.Miner.MarshalCBOR(buf); err != nil { + return xerrors.Errorf("failed to marshal miner address to cbor: %w", err) + } + + vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes()) + if err != nil { + return xerrors.Errorf("could not draw randomness: %w", err) + } + + if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil { + return xerrors.Errorf("validating block election proof failed: %w", err) + } + + slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner) + if err != nil { + return xerrors.Errorf("failed to check if block miner was slashed: %w", err) + } + + if slashed { + return xerrors.Errorf("received block was from slashed or invalid miner") + } + + mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner) + if err != nil { + return xerrors.Errorf("failed getting power: %w", err) + } + + j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower) + if h.ElectionProof.WinCount != j { + return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j) + } + + return nil + }) + blockSigCheck := async.Err(func() error { - if err := h.CheckBlockSignature(ctx, waddr); err != nil { + if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil { return xerrors.Errorf("check block signature failed: %w", err) } return nil }) + beaconValuesCheck := async.Err(func() error { + if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" { + return nil + } + + if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil { + return xerrors.Errorf("failed to validate blocks random beacon values: %w", err) + } + return nil + }) + tktsCheck := async.Err(func() error { - vrfBase := baseTs.MinTicket().VRFProof + buf := new(bytes.Buffer) + if err := h.Miner.MarshalCBOR(buf); err != nil { + return xerrors.Errorf("failed to marshal miner address to cbor: %w", err) + } - err := gen.VerifyVRF(ctx, waddr, h.Miner, gen.DSepTicket, vrfBase, h.Ticket.VRFProof) + if h.Height > build.UpgradeSmokeHeight { + buf.Write(baseTs.MinTicket().VRFProof) + } + beaconBase := *prevBeacon + if len(h.BeaconEntries) != 0 { + beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1] + } + + vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes()) + if err != nil { + return xerrors.Errorf("failed to compute vrf base for ticket: %w", err) + } + + err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof) if err != nil { return xerrors.Errorf("validating block tickets failed: %w", err) } return nil }) - eproofCheck := async.Err(func() error { - if err := syncer.VerifyElectionPoStProof(ctx, h, baseTs, waddr); err != nil { + wproofCheck := async.Err(func() error { + if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil { return xerrors.Errorf("invalid election post: %w", err) } return nil @@ -622,9 +922,11 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err minerCheck, tktsCheck, blockSigCheck, - eproofCheck, + beaconValuesCheck, + wproofCheck, winnerCheck, msgsCheck, + baseFeeCheck, } var merr error @@ -633,72 +935,92 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err merr = multierror.Append(merr, err) } } + if merr != nil { + mulErr := merr.(*multierror.Error) + mulErr.ErrorFormat = func(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0]) + } - return merr -} + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %+v", err) + } -func (syncer *Syncer) VerifyElectionPoStProof(ctx context.Context, h *types.BlockHeader, baseTs *types.TipSet, waddr address.Address) error { - rand, err := syncer.sm.ChainStore().GetRandomness(ctx, baseTs.Cids(), int64(h.Height-build.EcRandomnessLookback)) - if err != nil { - return xerrors.Errorf("failed to get randomness for verifying election proof: %w", err) - } - - if err := VerifyElectionPoStVRF(ctx, h.EPostProof.PostRand, rand, waddr, h.Miner); err != nil { - return xerrors.Errorf("checking eproof failed: %w", err) - } - - ssize, err := stmgr.GetMinerSectorSize(ctx, syncer.sm, baseTs, h.Miner) - if err != nil { - return xerrors.Errorf("failed to get sector size for miner: %w", err) - } - - var winners []sectorbuilder.EPostCandidate - for _, t := range h.EPostProof.Candidates { - var partial [32]byte - copy(partial[:], t.Partial) - winners = append(winners, sectorbuilder.EPostCandidate{ - PartialTicket: partial, - SectorID: t.SectorID, - SectorChallengeIndex: t.ChallengeIndex, - }) - } - // FORK START - if h.Height > build.ForkCCM { - if len(winners) == 0 { - return xerrors.Errorf("no candidates") + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) } - } - // FORK END - - sectorInfo, err := stmgr.GetSectorsForElectionPost(ctx, syncer.sm, baseTs, h.Miner) - if err != nil { - return xerrors.Errorf("getting election post sector set: %w", err) + return mulErr } - if build.InsecurePoStValidation { - if string(h.EPostProof.Proof) == "valid proof" { - return nil - } - return xerrors.Errorf("[TESTING] election post was invalid") - } - hvrf := sha256.Sum256(h.EPostProof.PostRand) - - ok, err := sectorbuilder.VerifyElectionPost(ctx, ssize, *sectorInfo, hvrf[:], h.EPostProof.Proof, winners, h.Miner) - if err != nil { - return xerrors.Errorf("failed to verify election post: %w", err) - } - - if !ok { - return xerrors.Errorf("election post was invalid") + if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil { + return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err) } return nil } +func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { + if build.InsecurePoStValidation { + if len(h.WinPoStProof) == 0 { + return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given") + } + + if string(h.WinPoStProof[0].ProofBytes) == "valid proof" { + return nil + } + return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid") + } + + buf := new(bytes.Buffer) + if err := h.Miner.MarshalCBOR(buf); err != nil { + return xerrors.Errorf("failed to marshal miner address: %w", err) + } + + rbase := prevBeacon + if len(h.BeaconEntries) > 0 { + rbase = h.BeaconEntries[len(h.BeaconEntries)-1] + } + + rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes()) + if err != nil { + return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err) + } + + mid, err := address.IDFromAddress(h.Miner) + if err != nil { + return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err) + } + + sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand) + if err != nil { + return xerrors.Errorf("getting winning post sector set: %w", err) + } + + ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{ + Randomness: rand, + Proofs: h.WinPoStProof, + ChallengedSectors: sectors, + Prover: abi.ActorID(mid), + }) + if err != nil { + return xerrors.Errorf("failed to verify election post: %w", err) + } + + if !ok { + log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors) + return xerrors.Errorf("winning post was invalid") + } + + return nil +} + +// TODO: We should extract this somewhere else and make the message pool and miner use the same logic func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error { { var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type - var pubks []bls.PublicKey + var pubks [][]byte for _, m := range b.BlsMessages { sigCids = append(sigCids, m.Cid()) @@ -717,31 +1039,49 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock } nonces := make(map[address.Address]uint64) - balances := make(map[address.Address]types.BigInt) stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs) if err != nil { return err } - cst := hamt.CSTFromBstore(syncer.store.Blockstore()) + cst := cbor.NewCborStore(syncer.store.Blockstore()) st, err := state.LoadStateTree(cst, stateroot) if err != nil { return xerrors.Errorf("failed to load base state tree: %w", err) } - checkMsg := func(m *types.Message) error { - if m.To == address.Undef { - return xerrors.New("'To' address cannot be empty") + pl := vm.PricelistByEpoch(baseTs.Height()) + var sumGasLimit int64 + checkMsg := func(msg types.ChainMsg) error { + m := msg.VMMessage() + + // Phase 1: syntactic validation, as defined in the spec + minGas := pl.OnChainMessage(msg.ChainLength()) + if err := m.ValidForBlockInclusion(minGas.Total()); err != nil { + return err } + // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit + // So below is overflow safe + sumGasLimit += m.GasLimit + if sumGasLimit > build.BlockGasLimit { + return xerrors.Errorf("block gas limit exceeded") + } + + // Phase 2: (Partial) semantic validation: + // the sender exists and is an account actor, and the nonces make sense if _, ok := nonces[m.From]; !ok { + // `GetActor` does not validate that this is an account actor. act, err := st.GetActor(m.From) if err != nil { return xerrors.Errorf("failed to get actor: %w", err) } + + if !act.IsAccountActor() { + return xerrors.New("Sender must be an account actor") + } nonces[m.From] = act.Nonce - balances[m.From] = act.Balance } if nonces[m.From] != m.Nonce { @@ -749,56 +1089,57 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock } nonces[m.From]++ - if balances[m.From].LessThan(m.RequiredFunds()) { - return xerrors.Errorf("not enough funds for message execution") - } - - balances[m.From] = types.BigSub(balances[m.From], m.RequiredFunds()) return nil } - bs := amt.WrapBlockstore(syncer.store.Blockstore()) - var blsCids []cbg.CBORMarshaler + store := adt.WrapStore(ctx, cst) + bmArr := adt.MakeEmptyArray(store) for i, m := range b.BlsMessages { if err := checkMsg(m); err != nil { return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) } c := cbg.CborCid(m.Cid()) - blsCids = append(blsCids, &c) + if err := bmArr.Set(uint64(i), &c); err != nil { + return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) + } } - var secpkCids []cbg.CBORMarshaler + smArr := adt.MakeEmptyArray(store) for i, m := range b.SecpkMessages { - if err := checkMsg(&m.Message); err != nil { + if err := checkMsg(m); err != nil { return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) } + // `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call + // in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`). kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs) if err != nil { return xerrors.Errorf("failed to resolve key addr: %w", err) } - if err := m.Signature.Verify(kaddr, m.Message.Cid().Bytes()); err != nil { + if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil { return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) } c := cbg.CborCid(m.Cid()) - secpkCids = append(secpkCids, &c) + if err := smArr.Set(uint64(i), &c); err != nil { + return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) + } } - bmroot, err := amt.FromArray(bs, blsCids) + bmroot, err := bmArr.Root() if err != nil { - return xerrors.Errorf("failed to build amt from bls msg cids: %w", err) + return err } - smroot, err := amt.FromArray(bs, secpkCids) + smroot, err := smArr.Root() if err != nil { - return xerrors.Errorf("failed to build amt from bls msg cids: %w", err) + return err } - mrcid, err := bs.Put(&types.MsgMeta{ + mrcid, err := cst.Put(ctx, &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, }) @@ -813,33 +1154,27 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock return nil } -func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig types.Signature, msgs []cid.Cid, pubks []bls.PublicKey) error { +func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error { _, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate") defer span.End() span.AddAttributes( trace.Int64Attribute("msgCount", int64(len(msgs))), ) - var wg sync.WaitGroup - - digests := make([]bls.Digest, len(msgs)) - for i := 0; i < 10; i++ { - wg.Add(1) - go func(w int) { - defer wg.Done() - for j := 0; (j*10)+w < len(msgs); j++ { - digests[j*10+w] = bls.Hash(bls.Message(msgs[j*10+w].Bytes())) - } - }(i) + msgsS := make([]blst.Message, len(msgs)) + for i := 0; i < len(msgs); i++ { + msgsS[i] = msgs[i].Bytes() } - wg.Wait() - var bsig bls.Signature - copy(bsig[:], sig.Data) - if !bls.Verify(&bsig, digests, pubks) { + if len(msgs) == 0 { + return nil + } + + valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks, + msgsS, []byte(bls.DST)) + if !valid { return xerrors.New("bls aggregate signature failed to verify") } - return nil } @@ -853,31 +1188,90 @@ func extractSyncState(ctx context.Context) *SyncerState { return nil } -func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { +// collectHeaders collects the headers from the blocks between any two tipsets. +// +// `incoming` is the heaviest/projected/target tipset we have learned about, and +// `known` is usually an anchor tipset we already have in our view of the chain +// (which could be the genesis). +// +// collectHeaders checks if portions of the chain are in our ChainStore; falling +// down to the network to retrieve the missing parts. If during the process, any +// portion we receive is in our denylist (bad list), we short-circuit. +// +// {hint/usage}: This is used by collectChain, which is in turn called from the +// main Sync method (Syncer#Sync), so it's a pretty central method. +// +// {hint/logic}: The logic of this method is as follows: +// +// 1. Check that the from tipset is not linked to a parent block known to be +// bad. +// 2. Check the consistency of beacon entries in the from tipset. We check +// total equality of the BeaconEntries in each block. +// 3. Traverse the chain backwards, for each tipset: +// 3a. Load it from the chainstore; if found, it move on to its parent. +// 3b. Query our peers via client in batches, requesting up to a +// maximum of 500 tipsets every time. +// +// Once we've concluded, if we find a mismatching tipset at the height where the +// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork +// to resolve it. Refer to the godocs there. +// +// All throughout the process, we keep checking if the received blocks are in +// the deny list, and short-circuit the process if so. +func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { ctx, span := trace.StartSpan(ctx, "collectHeaders") defer span.End() ss := extractSyncState(ctx) span.AddAttributes( - trace.Int64Attribute("fromHeight", int64(from.Height())), - trace.Int64Attribute("toHeight", int64(to.Height())), + trace.Int64Attribute("incomingHeight", int64(incoming.Height())), + trace.Int64Attribute("knownHeight", int64(known.Height())), ) - for _, pcid := range from.Parents().Cids() { - if syncer.bad.Has(pcid) { - for _, b := range from.Cids() { - syncer.bad.Add(b) + // Check if the parents of the from block are in the denylist. + // i.e. if a fork of the chain has been requested that we know to be bad. + for _, pcid := range incoming.Parents().Cids() { + if reason, ok := syncer.bad.Has(pcid); ok { + newReason := reason.Linked("linked to %s", pcid) + for _, b := range incoming.Cids() { + syncer.bad.Add(b, newReason) } - return nil, xerrors.Errorf("chain linked to block marked previously as bad (%s, %s)", from.Cids(), pcid) + return nil, xerrors.Errorf("chain linked to block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), pcid, reason) } } - blockSet := []*types.TipSet{from} + { + // ensure consistency of beacon entires + targetBE := incoming.Blocks()[0].BeaconEntries + sorted := sort.SliceIsSorted(targetBE, func(i, j int) bool { + return targetBE[i].Round < targetBE[j].Round + }) + if !sorted { + syncer.bad.Add(incoming.Cids()[0], NewBadBlockReason(incoming.Cids(), "wrong order of beacon entires")) + return nil, xerrors.Errorf("wrong order of beacon entires") + } - at := from.Parents() + for _, bh := range incoming.Blocks()[1:] { + if len(targetBE) != len(bh.BeaconEntries) { + // cannot mark bad, I think @Kubuxu + return nil, xerrors.Errorf("tipset contained different number for beacon entires") + } + for i, be := range bh.BeaconEntries { + if targetBE[i].Round != be.Round || !bytes.Equal(targetBE[i].Data, be.Data) { + // cannot mark bad, I think @Kubuxu + return nil, xerrors.Errorf("tipset contained different beacon entires") + } + } + + } + } + + blockSet := []*types.TipSet{incoming} + + at := incoming.Parents() // we want to sync all the blocks until the height above the block we have - untilHeight := to.Height() + 1 + untilHeight := known.Height() + 1 ss.SetHeight(blockSet[len(blockSet)-1].Height()) @@ -886,12 +1280,13 @@ func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to loop: for blockSet[len(blockSet)-1].Height() > untilHeight { for _, bc := range at.Cids() { - if syncer.bad.Has(bc) { + if reason, ok := syncer.bad.Has(bc); ok { + newReason := reason.Linked("change contained %s", bc) for _, b := range acceptedBlocks { - syncer.bad.Add(b) + syncer.bad.Add(b, newReason) } - return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s)", from.Cids(), bc) + return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason) } } @@ -909,13 +1304,13 @@ loop: } // NB: GetBlocks validates that the blocks are in-fact the ones we - // requested, and that they are correctly linked to eachother. It does - // not validate any state transitions + // requested, and that they are correctly linked to one another. It does + // not validate any state transitions. window := 500 if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window { window = gap } - blks, err := syncer.Bsync.GetBlocks(ctx, at, window) + blks, err := syncer.Exchange.GetBlocks(ctx, at, window) if err != nil { // Most likely our peers aren't fully synced yet, but forwarded // new block message (ideally we'd find better peers) @@ -929,17 +1324,34 @@ loop: } log.Info("Got blocks: ", blks[0].Height(), len(blks)) + // Check that the fetched segment of the chain matches what we already + // have. Since we fetch from the head backwards our reassembled chain + // is sorted in reverse here: we have a child -> parent order, our last + // tipset then should be child of the first tipset retrieved. + // FIXME: The reassembly logic should be part of the `client` + // service, the consumer should not be concerned with the + // `MaxRequestLength` limitation, it should just be able to request + // an segment of arbitrary length. The same burden is put on + // `syncFork()` which needs to be aware this as well. + if blockSet[len(blockSet)-1].IsChildOf(blks[0]) == false { + return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d", + blockSet[len(blockSet)-1].Height(), blks[0].Height()) + // A successful `GetBlocks()` call is guaranteed to fetch at least + // one tipset so the acess `blks[0]` is safe. + } + for _, b := range blks { if b.Height() < untilHeight { break loop } for _, bc := range b.Cids() { - if syncer.bad.Has(bc) { + if reason, ok := syncer.bad.Has(bc); ok { + newReason := reason.Linked("change contained %s", bc) for _, b := range acceptedBlocks { - syncer.bad.Add(b) + syncer.bad.Add(b, newReason) } - return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s)", from.Cids(), bc) + return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason) } } blockSet = append(blockSet, b) @@ -951,42 +1363,60 @@ loop: at = blks[len(blks)-1].Parents() } - // We have now ascertained that this is *not* a 'fast forward' - if !types.CidArrsEqual(blockSet[len(blockSet)-1].Parents().Cids(), to.Cids()) { - last := blockSet[len(blockSet)-1] - if last.Parents() == to.Parents() { - // common case: receiving a block thats potentially part of the same tipset as our best block - return blockSet, nil - } - - log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", from.Cids(), from.Height(), to.Cids(), to.Height()) - fork, err := syncer.syncFork(ctx, last, to) - if err != nil { - if xerrors.Is(err, ErrForkTooLong) { - // TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish? - log.Warn("adding forked chain to our bad tipset cache") - for _, b := range from.Blocks() { - syncer.bad.Add(b.Cid()) - } - } - return nil, xerrors.Errorf("failed to sync fork: %w", err) - } - - blockSet = append(blockSet, fork...) + base := blockSet[len(blockSet)-1] + if base.Parents() == known.Parents() { + // common case: receiving a block thats potentially part of the same tipset as our best block + return blockSet, nil } + if types.CidArrsEqual(base.Parents().Cids(), known.Cids()) { + // common case: receiving blocks that are building on top of our best tipset + return blockSet, nil + } + + // We have now ascertained that this is *not* a 'fast forward' + log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height()) + fork, err := syncer.syncFork(ctx, base, known) + if err != nil { + if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) { + // TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish? + log.Warn("adding forked chain to our bad tipset cache") + for _, b := range incoming.Blocks() { + syncer.bad.Add(b.Cid(), NewBadBlockReason(incoming.Cids(), "fork past finality")) + } + } + return nil, xerrors.Errorf("failed to sync fork: %w", err) + } + + blockSet = append(blockSet, fork...) + return blockSet, nil } var ErrForkTooLong = fmt.Errorf("fork longer than threshold") +var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkpointed block") -func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { - tips, err := syncer.Bsync.GetBlocks(ctx, from.Parents(), build.ForkLengthThreshold) +// syncFork tries to obtain the chain fragment that links a fork into a common +// ancestor in our view of the chain. +// +// If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint), +// we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain +// fragment until the fork point to the returned []TipSet. +func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { + + chkpt := syncer.GetCheckpoint() + if known.Key() == chkpt { + return nil, ErrForkCheckpoint + } + + // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? + // Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare? + tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold)) if err != nil { return nil, err } - nts, err := syncer.store.LoadTipSet(to.Parents()) + nts, err := syncer.store.LoadTipSet(known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } @@ -996,7 +1426,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *type if !syncer.Genesis.Equals(nts) { return nil, xerrors.Errorf("somehow synced chain that linked back to a different genesis (bad genesis: %s)", nts.Key()) } - return nil, xerrors.Errorf("synced chain forked at genesis, refusing to sync") + return nil, xerrors.Errorf("synced chain forked at genesis, refusing to sync; incoming: %s", incoming.Cids()) } if nts.Equals(tips[cur]) { @@ -1006,12 +1436,18 @@ func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *type if nts.Height() < tips[cur].Height() { cur++ } else { + // We will be forking away from nts, check that it isn't checkpointed + if nts.Key() == chkpt { + return nil, ErrForkCheckpoint + } + nts, err = syncer.store.LoadTipSet(nts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next local tipset: %w", err) } } } + return nil, ErrForkTooLong } @@ -1026,6 +1462,7 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []* return xerrors.Errorf("message processing failed: %w", err) } + stats.Record(ctx, metrics.ChainNodeWorkerHeight.M(int64(fts.TipSet().Height()))) ss.SetHeight(fts.TipSet().Height()) return nil @@ -1034,12 +1471,12 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []* // fills out each of the given tipsets with messages and calls the callback with it func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error { + ss := extractSyncState(ctx) ctx, span := trace.StartSpan(ctx, "iterFullTipsets") defer span.End() span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers)))) - windowSize := 200 for i := len(headers) - 1; i >= 0; { fts, err := syncer.store.TryFillTipSet(headers[i]) if err != nil { @@ -1053,30 +1490,32 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS continue } - batchSize := windowSize + batchSize := concurrentSyncRequests * syncRequestBatchSize if i < batchSize { - batchSize = i + batchSize = i + 1 } - next := headers[i-batchSize] - bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(batchSize+1)) - if err != nil { - return xerrors.Errorf("message processing failed: %w", err) + ss.SetStage(api.StageFetchingMessages) + startOffset := i + 1 - batchSize + bstout, batchErr := syncer.fetchMessages(ctx, headers[startOffset:startOffset+batchSize], startOffset) + ss.SetStage(api.StageMessages) + + if batchErr != nil { + return xerrors.Errorf("failed to fetch messages: %w", err) } - for bsi := 0; bsi < len(bstips); bsi++ { + for bsi := 0; bsi < len(bstout); bsi++ { // temp storage so we don't persist data we dont want to - ds := dstore.NewMapDatastore() - bs := bstore.NewBlockstore(ds) - blks := amt.WrapBlockstore(bs) + bs := bstore.NewTemporary() + blks := cbor.NewCborStore(bs) this := headers[i-bsi] - bstip := bstips[len(bstips)-(bsi+1)] - fts, err := zipTipSetAndMessages(blks, this, bstip.BlsMessages, bstip.SecpkMessages, bstip.BlsMsgIncludes, bstip.SecpkMsgIncludes) + bstip := bstout[len(bstout)-(bsi+1)] + fts, err := zipTipSetAndMessages(blks, this, bstip.Bls, bstip.Secpk, bstip.BlsIncludes, bstip.SecpkIncludes) if err != nil { log.Warnw("zipping failed", "error", err, "bsi", bsi, "i", i, - "height", this.Height(), "bstip-height", bstip.Blocks[0].Height, - "bstips", bstips, "next-height", i+batchSize) + "height", this.Height(), + "next-height", i+batchSize) return xerrors.Errorf("message processing failed: %w", err) } @@ -1084,31 +1523,101 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS return err } - if err := persistMessages(bs, bstip); err != nil { + if err := persistMessages(ctx, bs, bstip); err != nil { return err } - if err := copyBlockstore(bs, syncer.store.Blockstore()); err != nil { + if err := copyBlockstore(ctx, bs, syncer.store.Blockstore()); err != nil { return xerrors.Errorf("message processing failed: %w", err) } } - i -= windowSize + + i -= batchSize } return nil } -func persistMessages(bs bstore.Blockstore, bst *blocksync.BSTipSet) error { - for _, m := range bst.BlsMessages { +func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) { + batchSize := len(headers) + batch := make([]*exchange.CompactedMessages, batchSize) + + var wg sync.WaitGroup + var mx sync.Mutex + var batchErr error + + start := build.Clock.Now() + + for j := 0; j < batchSize; j += syncRequestBatchSize { + wg.Add(1) + go func(j int) { + defer wg.Done() + + nreq := syncRequestBatchSize + if j+nreq > batchSize { + nreq = batchSize - j + } + + failed := false + for offset := 0; !failed && offset < nreq; { + nextI := j + offset + lastI := j + nreq + + var requestErr error + var requestResult []*exchange.CompactedMessages + for retry := 0; requestResult == nil && retry < syncRequestRetries; retry++ { + if retry > 0 { + log.Infof("fetching messages at %d (retry %d)", startOffset+nextI, retry) + } else { + log.Infof("fetching messages at %d", startOffset+nextI) + } + + result, err := syncer.Exchange.GetChainMessages(ctx, headers[nextI:lastI]) + if err != nil { + requestErr = multierror.Append(requestErr, err) + } else { + requestResult = result + } + } + + mx.Lock() + if requestResult != nil { + copy(batch[j+offset:], requestResult) + offset += len(requestResult) + } else { + log.Errorf("error fetching messages at %d: %s", nextI, requestErr) + batchErr = multierror.Append(batchErr, requestErr) + failed = true + } + mx.Unlock() + } + }(j) + } + wg.Wait() + + if batchErr != nil { + return nil, batchErr + } + + log.Infof("fetching messages for %d tipsets at %d done; took %s", batchSize, startOffset, build.Clock.Since(start)) + + return batch, nil +} + +func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.CompactedMessages) error { + _, span := trace.StartSpan(ctx, "persistMessages") + defer span.End() + + for _, m := range bst.Bls { //log.Infof("putting BLS message: %s", m.Cid()) if _, err := store.PutMessage(bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("BLS message processing failed: %w", err) } } - for _, m := range bst.SecpkMessages { - if m.Signature.Type != types.KTSecp256k1 { - return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.TypeCode) + for _, m := range bst.Secpk { + if m.Signature.Type != crypto.SigTypeSecp256k1 { + return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type) } //log.Infof("putting secp256k1 message: %s", m.Cid()) if _, err := store.PutMessage(bs, m); err != nil { @@ -1120,6 +1629,25 @@ func persistMessages(bs bstore.Blockstore, bst *blocksync.BSTipSet) error { return nil } +// collectChain tries to advance our view of the chain to the purported head. +// +// It goes through various stages: +// +// 1. StageHeaders: we proceed in the sync process by requesting block headers +// from our peers, moving back from their heads, until we reach a tipset +// that we have in common (such a common tipset must exist, thought it may +// simply be the genesis block). +// +// If the common tipset is our head, we treat the sync as a "fast-forward", +// else we must drop part of our chain to connect to the peer's head +// (referred to as "forking"). +// +// 2. StagePersistHeaders: now that we've collected the missing headers, +// augmented by those on the other side of a fork, we persist them to the +// BlockStore. +// +// 3. StageMessages: having acquired the headers and found a common tipset, +// we then move forward, requesting the full blocks, including the messages. func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error { ctx, span := trace.StartSpan(ctx, "collectChain") defer span.End() @@ -1141,7 +1669,7 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error ss.SetStage(api.StagePersistHeaders) - toPersist := make([]*types.BlockHeader, 0, len(headers)*build.BlocksPerEpoch) + toPersist := make([]*types.BlockHeader, 0, len(headers)*int(build.BlocksPerEpoch)) for _, ts := range headers { toPersist = append(toPersist, ts.Blocks()...) } @@ -1166,22 +1694,60 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error return nil } -func VerifyElectionPoStVRF(ctx context.Context, evrf []byte, rand []byte, worker, miner address.Address) error { - if err := gen.VerifyVRF(ctx, worker, miner, gen.DSepElectionPost, rand, evrf); err != nil { - return xerrors.Errorf("failed to verify post_randomness vrf: %w", err) +func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error { + if build.InsecurePoStValidation { + return nil } - - return nil + return gen.VerifyVRF(ctx, worker, rand, evrf) } func (syncer *Syncer) State() []SyncerState { - var out []SyncerState - for _, ss := range syncer.syncmgr.syncStates { - out = append(out, ss.Snapshot()) - } - return out + return syncer.syncmgr.State() } +// MarkBad manually adds a block to the "bad blocks" cache. func (syncer *Syncer) MarkBad(blk cid.Cid) { - syncer.bad.Add(blk) + syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad")) +} + +// UnmarkBad manually adds a block to the "bad blocks" cache. +func (syncer *Syncer) UnmarkBad(blk cid.Cid) { + syncer.bad.Remove(blk) +} + +func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { + bbr, ok := syncer.bad.Has(blk) + return bbr.String(), ok +} + +func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { + cur := ts + for i := 0; i < 20; i++ { + cbe := cur.Blocks()[0].BeaconEntries + if len(cbe) > 0 { + return &cbe[len(cbe)-1], nil + } + + if cur.Height() == 0 { + return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") + } + + next, err := syncer.store.LoadTipSet(cur.Parents()) + if err != nil { + return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) + } + cur = next + } + + return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets") +} + +func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { + g, err := syncer.store.GetGenesis() + if err != nil { + return false + } + + now := uint64(build.Clock.Now().Unix()) + return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift) } diff --git a/chain/sync_manager.go b/chain/sync_manager.go index e00063961..811092bc7 100644 --- a/chain/sync_manager.go +++ b/chain/sync_manager.go @@ -20,7 +20,28 @@ const ( type SyncFunc func(context.Context, *types.TipSet) error -type SyncManager struct { +// SyncManager manages the chain synchronization process, both at bootstrap time +// and during ongoing operation. +// +// It receives candidate chain heads in the form of tipsets from peers, +// and schedules them onto sync workers, deduplicating processing for +// already-active syncs. +type SyncManager interface { + // Start starts the SyncManager. + Start() + + // Stop stops the SyncManager. + Stop() + + // SetPeerHead informs the SyncManager that the supplied peer reported the + // supplied tipset. + SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) + + // State retrieves the state of the sync workers. + State() []SyncerState +} + +type syncManager struct { lk sync.Mutex peerHeads map[peer.ID]*types.TipSet @@ -35,6 +56,7 @@ type SyncManager struct { syncStates []*SyncerState + // Normally this handler is set to `(*Syncer).Sync()`. doSync func(context.Context, *types.TipSet) error stop chan struct{} @@ -47,6 +69,8 @@ type SyncManager struct { workerChan chan *types.TipSet } +var _ SyncManager = (*syncManager)(nil) + type syncResult struct { ts *types.TipSet success bool @@ -54,8 +78,8 @@ type syncResult struct { const syncWorkerCount = 3 -func NewSyncManager(sync SyncFunc) *SyncManager { - return &SyncManager{ +func NewSyncManager(sync SyncFunc) SyncManager { + return &syncManager{ bspThresh: 1, peerHeads: make(map[peer.ID]*types.TipSet), syncTargets: make(chan *types.TipSet), @@ -68,18 +92,18 @@ func NewSyncManager(sync SyncFunc) *SyncManager { } } -func (sm *SyncManager) Start() { +func (sm *syncManager) Start() { go sm.syncScheduler() for i := 0; i < syncWorkerCount; i++ { go sm.syncWorker(i) } } -func (sm *SyncManager) Stop() { +func (sm *syncManager) Stop() { close(sm.stop) } -func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) { +func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) { sm.lk.Lock() defer sm.lk.Unlock() sm.peerHeads[p] = ts @@ -104,6 +128,14 @@ func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.Tip sm.incomingTipSets <- ts } +func (sm *syncManager) State() []SyncerState { + ret := make([]SyncerState, 0, len(sm.syncStates)) + for _, s := range sm.syncStates { + ret = append(ret, s.Snapshot()) + } + return ret +} + type syncBucketSet struct { buckets []*syncTargetBucket } @@ -233,7 +265,7 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet { return best } -func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) { +func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) { var buckets syncBucketSet var peerHeads []*types.TipSet @@ -257,7 +289,7 @@ func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) { return buckets.Heaviest(), nil } -func (sm *SyncManager) syncScheduler() { +func (sm *syncManager) syncScheduler() { for { select { @@ -279,8 +311,8 @@ func (sm *SyncManager) syncScheduler() { } } -func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) { - log.Info("scheduling incoming tipset sync: ", ts.Cids()) +func (sm *syncManager) scheduleIncoming(ts *types.TipSet) { + log.Debug("scheduling incoming tipset sync: ", ts.Cids()) if sm.getBootstrapState() == BSStateSelected { sm.setBootstrapState(BSStateScheduled) sm.syncTargets <- ts @@ -327,10 +359,11 @@ func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) { } } -func (sm *SyncManager) scheduleProcessResult(res *syncResult) { +func (sm *syncManager) scheduleProcessResult(res *syncResult) { if res.success && sm.getBootstrapState() != BSStateComplete { sm.setBootstrapState(BSStateComplete) } + delete(sm.activeSyncs, res.ts.Key()) relbucket := sm.activeSyncTips.PopRelated(res.ts) if relbucket != nil { @@ -342,12 +375,12 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) { sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket) } return - } else { - // TODO: this is the case where we try to sync a chain, and - // fail, and we have more blocks on top of that chain that - // have come in since. The question is, should we try to - // sync these? or just drop them? } + // TODO: this is the case where we try to sync a chain, and + // fail, and we have more blocks on top of that chain that + // have come in since. The question is, should we try to + // sync these? or just drop them? + log.Error("failed to sync chain but have new unconnected blocks from chain") } if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() { @@ -359,7 +392,7 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) { } } -func (sm *SyncManager) scheduleWorkSent() { +func (sm *syncManager) scheduleWorkSent() { hts := sm.nextSyncTarget.heaviestTipSet() sm.activeSyncs[hts.Key()] = hts @@ -371,7 +404,7 @@ func (sm *SyncManager) scheduleWorkSent() { } } -func (sm *SyncManager) syncWorker(id int) { +func (sm *syncManager) syncWorker(id int) { ss := &SyncerState{} sm.syncStates[id] = ss for { @@ -396,7 +429,7 @@ func (sm *SyncManager) syncWorker(id int) { } } -func (sm *SyncManager) syncedPeerCount() int { +func (sm *syncManager) syncedPeerCount() int { var count int for _, ts := range sm.peerHeads { if ts.Height() > 0 { @@ -406,19 +439,19 @@ func (sm *SyncManager) syncedPeerCount() int { return count } -func (sm *SyncManager) getBootstrapState() int { +func (sm *syncManager) getBootstrapState() int { sm.bssLk.Lock() defer sm.bssLk.Unlock() return sm.bootstrapState } -func (sm *SyncManager) setBootstrapState(v int) { +func (sm *syncManager) setBootstrapState(v int) { sm.bssLk.Lock() defer sm.bssLk.Unlock() sm.bootstrapState = v } -func (sm *SyncManager) IsBootstrapped() bool { +func (sm *syncManager) IsBootstrapped() bool { sm.bssLk.Lock() defer sm.bssLk.Unlock() return sm.bootstrapState == BSStateComplete diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go index ca2ced856..269b3a62e 100644 --- a/chain/sync_manager_test.go +++ b/chain/sync_manager_test.go @@ -17,7 +17,7 @@ type syncOp struct { done func() } -func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *SyncManager, chan *syncOp)) { +func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *syncManager, chan *syncOp)) { syncTargets := make(chan *syncOp) sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error { ch := make(chan struct{}) @@ -27,7 +27,7 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, } <-ch return nil - }) + }).(*syncManager) sm.bspThresh = thresh sm.Start() @@ -77,12 +77,12 @@ func TestSyncManager(t *testing.T) { c3 := mock.TipSet(mock.MkBlock(b, 3, 5)) d := mock.TipSet(mock.MkBlock(c1, 4, 5)) - runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { + runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) { sm.SetPeerHead(ctx, "peer1", c1) assertGetSyncOp(t, stc, c1) }) - runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { + runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *syncManager, stc chan *syncOp) { sm.SetPeerHead(ctx, "peer1", c1) assertNoOp(t, stc) @@ -90,7 +90,7 @@ func TestSyncManager(t *testing.T) { assertGetSyncOp(t, stc, c1) }) - runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { + runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) { sm.SetPeerHead(ctx, "peer1", b) assertGetSyncOp(t, stc, b) @@ -101,7 +101,7 @@ func TestSyncManager(t *testing.T) { assertGetSyncOp(t, stc, c2) }) - runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { + runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) { sm.SetPeerHead(ctx, "peer1", a) assertGetSyncOp(t, stc, a) @@ -122,7 +122,7 @@ func TestSyncManager(t *testing.T) { assertGetSyncOp(t, stc, d) }) - runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { + runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) { sm.SetPeerHead(ctx, "peer1", a) assertGetSyncOp(t, stc, a) diff --git a/chain/sync_test.go b/chain/sync_test.go index c9b8e7bbb..7a839be2b 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -7,17 +7,27 @@ import ( "testing" "time" - logging "github.com/ipfs/go-log" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + "github.com/ipfs/go-cid" + + ds "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" + mocktypes "github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/modules" @@ -26,9 +36,13 @@ import ( func init() { build.InsecurePoStValidation = true - os.Setenv("TRUST_PARAMS", "1") - build.SectorSizes = []uint64{1024} - build.MinimumMinerPower = 1024 + err := os.Setenv("TRUST_PARAMS", "1") + if err != nil { + panic(err) + } + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) } const source = 0 @@ -73,7 +87,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil { g, err := gen.NewGenerator() if err != nil { - t.Fatal(err) + t.Fatalf("%+v", err) } ctx, cancel := context.WithCancel(context.Background()) @@ -158,10 +172,9 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo require.NoError(tu.t, err) } } - } -func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, src int, miners []int, wait, fail bool) *store.FullTipSet { +func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage) *store.FullTipSet { if miners == nil { for i := range tu.g.Miners { miners = append(miners, i) @@ -175,37 +188,31 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, src int, miners []int fmt.Println("Miner mining block: ", maddrs) - mts, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs) - require.NoError(tu.t, err) - - if fail { - tu.pushTsExpectErr(src, mts.TipSet, true) + var nts *store.FullTipSet + var err error + if msgs != nil { + nts, err = tu.g.NextTipSetFromMinersWithMessages(blk.TipSet(), maddrs, msgs) + require.NoError(tu.t, err) } else { - tu.pushFtsAndWait(src, mts.TipSet, wait) + mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs) + require.NoError(tu.t, err) + nts = mt.TipSet } - return mts.TipSet + if fail { + tu.pushTsExpectErr(to, nts, true) + } else { + tu.pushFtsAndWait(to, nts, wait) + } + + return nts } func (tu *syncTestUtil) mineNewBlock(src int, miners []int) { - mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false) + mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil) tu.g.CurTipset = mts } -func fblkToBlkMsg(fb *types.FullBlock) *types.BlockMsg { - out := &types.BlockMsg{ - Header: fb.Header, - } - - for _, msg := range fb.BlsMessages { - out.BlsMessages = append(out.BlsMessages, msg.Cid()) - } - for _, msg := range fb.SecpkMessages { - out.SecpkMessages = append(out.SecpkMessages, msg.Cid()) - } - return out -} - func (tu *syncTestUtil) addSourceNode(gen int) { if tu.genesis != nil { tu.t.Fatal("source node already exists") @@ -321,6 +328,36 @@ func (tu *syncTestUtil) compareSourceState(with int) { } } +func (tu *syncTestUtil) assertBad(node int, ts *types.TipSet) { + for _, blk := range ts.Cids() { + rsn, err := tu.nds[node].SyncCheckBad(context.TODO(), blk) + require.NoError(tu.t, err) + require.True(tu.t, len(rsn) != 0) + } +} + +func (tu *syncTestUtil) getHead(node int) *types.TipSet { + ts, err := tu.nds[node].ChainHead(context.TODO()) + require.NoError(tu.t, err) + return ts +} + +func (tu *syncTestUtil) checkpointTs(node int, tsk types.TipSetKey) { + require.NoError(tu.t, tu.nds[node].SyncCheckpoint(context.TODO(), tsk)) +} + +func (tu *syncTestUtil) waitUntilNodeHasTs(node int, tsk types.TipSetKey) { + for { + _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk) + if err != nil { + break + } + } + + // Time to allow for syncing and validation + time.Sleep(2 * time.Second) +} + func (tu *syncTestUtil) waitUntilSync(from, to int) { target, err := tu.nds[from].ChainHead(tu.ctx) if err != nil { @@ -398,21 +435,23 @@ func TestSyncBadTimestamp(t *testing.T) { tu.waitUntilSync(0, client) base := tu.g.CurTipset - tu.g.Timestamper = func(pts *types.TipSet, tl uint64) uint64 { - return pts.MinTimestamp() + (build.BlockDelay / 2) + tu.g.Timestamper = func(pts *types.TipSet, tl abi.ChainEpoch) uint64 { + return pts.MinTimestamp() + (build.BlockDelaySecs / 2) } fmt.Println("BASE: ", base.Cids()) tu.printHeads() - a1 := tu.mineOnBlock(base, 0, nil, false, true) + a1 := tu.mineOnBlock(base, 0, nil, false, true, nil) tu.g.Timestamper = nil - tu.g.ResyncBankerNonce(a1.TipSet()) + require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) + + tu.nds[0].(*impl.FullNodeAPI).SlashFilter = slashfilter.New(ds.NewMapDatastore()) fmt.Println("After mine bad block!") tu.printHeads() - a2 := tu.mineOnBlock(base, 0, nil, true, false) + a2 := tu.mineOnBlock(base, 0, nil, true, false, nil) tu.waitUntilSync(0, client) @@ -424,6 +463,41 @@ func TestSyncBadTimestamp(t *testing.T) { } } +type badWpp struct{} + +func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) { + return []uint64{1}, nil +} + +func (wpp badWpp) ComputeProof(context.Context, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) { + return []proof.PoStProof{ + { + PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, + ProofBytes: []byte("evil"), + }, + }, nil +} + +func TestSyncBadWinningPoSt(t *testing.T) { + H := 15 + tu := prepSyncTest(t, H) + + client := tu.addClientNode() + + require.NoError(t, tu.mn.LinkAll()) + tu.connect(client, 0) + tu.waitUntilSync(0, client) + + base := tu.g.CurTipset + + // both miners now produce invalid winning posts + tu.g.SetWinningPoStProver(tu.g.Miners[0], &badWpp{}) + tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{}) + + // now ensure that new blocks are not accepted + tu.mineOnBlock(base, client, nil, false, true, nil) +} + func (tu *syncTestUtil) loadChainToNode(to int) { // utility to simulate incoming blocks without miner process // TODO: should call syncer directly, this won't work correctly in all cases @@ -466,16 +540,16 @@ func TestSyncFork(t *testing.T) { fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false) - a = tu.mineOnBlock(a, p1, []int{0}, true, false) + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) - tu.g.ResyncBankerNonce(a1.TipSet()) + require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false) - b = tu.mineOnBlock(b, p2, []int{1}, true, false) - b = tu.mineOnBlock(b, p2, []int{1}, true, false) - b = tu.mineOnBlock(b, p2, []int{1}, true, false) + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) @@ -490,6 +564,142 @@ func TestSyncFork(t *testing.T) { phead() } +// This test crafts a tipset with 2 blocks, A and B. +// A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X). +// We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected. +func TestDuplicateNonce(t *testing.T) { + H := 10 + tu := prepSyncTest(t, H) + + base := tu.g.CurTipset + + // Produce a message from the banker to the rcvr + makeMsg := func(rcvr address.Address) *types.SignedMessage { + + ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key()) + require.NoError(t, err) + msg := types.Message{ + To: rcvr, + From: tu.g.Banker(), + + Nonce: ba.Nonce, + + Value: types.NewInt(1), + + Method: 0, + + GasLimit: 100_000_000, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + } + + sig, err := tu.g.Wallet().Sign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes()) + require.NoError(t, err) + + return &types.SignedMessage{ + Message: msg, + Signature: *sig, + } + } + + msgs := make([][]*types.SignedMessage, 2) + // Each miner includes a message from the banker with the same nonce, but to different addresses + for k := range msgs { + msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])} + } + + ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs) + + tu.waitUntilSyncTarget(0, ts1.TipSet()) + + // mine another tipset + + ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2)) + tu.waitUntilSyncTarget(0, ts2.TipSet()) + + var includedMsg cid.Cid + var skippedMsg cid.Cid + r0, err0 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[0][0].Cid(), ts2.TipSet().Key()) + r1, err1 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[1][0].Cid(), ts2.TipSet().Key()) + + if err0 == nil { + require.Error(t, err1, "at least one of the StateGetReceipt calls should fail") + require.True(t, r0.ExitCode.IsSuccess()) + includedMsg = msgs[0][0].Message.Cid() + skippedMsg = msgs[1][0].Message.Cid() + } else { + require.NoError(t, err1, "both the StateGetReceipt calls should not fail") + require.True(t, r1.ExitCode.IsSuccess()) + includedMsg = msgs[1][0].Message.Cid() + skippedMsg = msgs[0][0].Message.Cid() + } + + _, rslts, err := tu.g.StateManager().ExecutionTrace(context.TODO(), ts1.TipSet()) + require.NoError(t, err) + found := false + for _, v := range rslts { + if v.Msg.Cid() == skippedMsg { + t.Fatal("skipped message should not be in exec trace") + } + + if v.Msg.Cid() == includedMsg { + found = true + } + } + + if !found { + t.Fatal("included message should be in exec trace") + } + + mft, err := tu.g.ChainStore().MessagesForTipset(ts1.TipSet()) + require.NoError(t, err) + require.True(t, len(mft) == 1, "only expecting one message for this tipset") + require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message") +} + +// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't +// be applied on the parent state. +func TestBadNonce(t *testing.T) { + H := 10 + tu := prepSyncTest(t, H) + + base := tu.g.CurTipset + + // Produce a message from the banker with a bad nonce + makeBadMsg := func() *types.SignedMessage { + + ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key()) + require.NoError(t, err) + msg := types.Message{ + To: tu.g.Banker(), + From: tu.g.Banker(), + + Nonce: ba.Nonce + 5, + + Value: types.NewInt(1), + + Method: 0, + + GasLimit: 100_000_000, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + } + + sig, err := tu.g.Wallet().Sign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes()) + require.NoError(t, err) + + return &types.SignedMessage{ + Message: msg, + Signature: *sig, + } + } + + msgs := make([][]*types.SignedMessage, 1) + msgs[0] = []*types.SignedMessage{makeBadMsg()} + + tu.mineOnBlock(base, 0, []int{0}, true, true, msgs) +} + func BenchmarkSyncBasic(b *testing.B) { for i := 0; i < b.N; i++ { runSyncBenchLength(b, 100) @@ -509,3 +719,114 @@ func runSyncBenchLength(b *testing.B, l int) { tu.waitUntilSync(0, client) } + +func TestSyncInputs(t *testing.T) { + H := 10 + tu := prepSyncTest(t, H) + + p1 := tu.addClientNode() + + fn := tu.nds[p1].(*impl.FullNodeAPI) + + s := fn.SyncAPI.Syncer + + err := s.ValidateBlock(context.TODO(), &types.FullBlock{ + Header: &types.BlockHeader{}, + }) + if err == nil { + t.Fatal("should error on empty block") + } + + h := mocktypes.MkBlock(nil, 123, 432) + + h.ElectionProof = nil + + err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h}) + if err == nil { + t.Fatal("should error on block with nil election proof") + } +} + +func TestSyncCheckpointHead(t *testing.T) { + H := 10 + tu := prepSyncTest(t, H) + + p1 := tu.addClientNode() + p2 := tu.addClientNode() + + fmt.Println("GENESIS: ", tu.g.Genesis().Cid()) + tu.loadChainToNode(p1) + tu.loadChainToNode(p2) + + base := tu.g.CurTipset + fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) + + // The two nodes fork at this point into 'a' and 'b' + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + + tu.waitUntilSyncTarget(p1, a.TipSet()) + tu.checkpointTs(p1, a.TipSet().Key()) + + require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) + // chain B will now be heaviest + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + + fmt.Println("A: ", a.Cids(), a.TipSet().Height()) + fmt.Println("B: ", b.Cids(), b.TipSet().Height()) + + // Now for the fun part!! p1 should mark p2's head as BAD. + + require.NoError(t, tu.mn.LinkAll()) + tu.connect(p1, p2) + tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) + p1Head := tu.getHead(p1) + require.Equal(tu.t, p1Head, a.TipSet()) + tu.assertBad(p1, b.TipSet()) +} + +func TestSyncCheckpointEarlierThanHead(t *testing.T) { + H := 10 + tu := prepSyncTest(t, H) + + p1 := tu.addClientNode() + p2 := tu.addClientNode() + + fmt.Println("GENESIS: ", tu.g.Genesis().Cid()) + tu.loadChainToNode(p1) + tu.loadChainToNode(p2) + + base := tu.g.CurTipset + fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) + + // The two nodes fork at this point into 'a' and 'b' + a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) + a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) + a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) + + tu.waitUntilSyncTarget(p1, a.TipSet()) + tu.checkpointTs(p1, a1.TipSet().Key()) + + require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) + // chain B will now be heaviest + b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) + + fmt.Println("A: ", a.Cids(), a.TipSet().Height()) + fmt.Println("B: ", b.Cids(), b.TipSet().Height()) + + // Now for the fun part!! p1 should mark p2's head as BAD. + + require.NoError(t, tu.mn.LinkAll()) + tu.connect(p1, p2) + tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) + p1Head := tu.getHead(p1) + require.Equal(tu.t, p1Head, a.TipSet()) + tu.assertBad(p1, b.TipSet()) +} diff --git a/chain/syncstate.go b/chain/syncstate.go index 622598193..06cd5d91e 100644 --- a/chain/syncstate.go +++ b/chain/syncstate.go @@ -1,37 +1,22 @@ package chain import ( - "fmt" "sync" "time" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" ) -func SyncStageString(v api.SyncStateStage) string { - switch v { - case api.StageHeaders: - return "header sync" - case api.StagePersistHeaders: - return "persisting headers" - case api.StageMessages: - return "message sync" - case api.StageSyncComplete: - return "complete" - case api.StageSyncErrored: - return "error" - default: - return fmt.Sprintf("", v) - } -} - type SyncerState struct { lk sync.Mutex Target *types.TipSet Base *types.TipSet Stage api.SyncStateStage - Height uint64 + Height abi.ChainEpoch Message string Start time.Time End time.Time @@ -46,7 +31,7 @@ func (ss *SyncerState) SetStage(v api.SyncStateStage) { defer ss.lk.Unlock() ss.Stage = v if v == api.StageSyncComplete { - ss.End = time.Now() + ss.End = build.Clock.Now() } } @@ -62,11 +47,11 @@ func (ss *SyncerState) Init(base, target *types.TipSet) { ss.Stage = api.StageHeaders ss.Height = 0 ss.Message = "" - ss.Start = time.Now() + ss.Start = build.Clock.Now() ss.End = time.Time{} } -func (ss *SyncerState) SetHeight(h uint64) { +func (ss *SyncerState) SetHeight(h abi.ChainEpoch) { if ss == nil { return } @@ -85,7 +70,7 @@ func (ss *SyncerState) Error(err error) { defer ss.lk.Unlock() ss.Message = err.Error() ss.Stage = api.StageSyncErrored - ss.End = time.Now() + ss.End = build.Clock.Now() } func (ss *SyncerState) Snapshot() SyncerState { diff --git a/chain/types/actor.go b/chain/types/actor.go index a3a4418c1..eb8e05c49 100644 --- a/chain/types/actor.go +++ b/chain/types/actor.go @@ -1,16 +1,35 @@ package types import ( - "fmt" + "errors" "github.com/ipfs/go-cid" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" ) -var ErrActorNotFound = fmt.Errorf("actor not found") +var ErrActorNotFound = errors.New("actor not found") type Actor struct { + // Identifies the type of actor (string coded as a CID), see `chain/actors/actors.go`. Code cid.Cid Head cid.Cid Nonce uint64 Balance BigInt } + +func (a *Actor) IsAccountActor() bool { + return a.Code == builtin0.AccountActorCodeID +} + +func (a *Actor) IsStorageMinerActor() bool { + return a.Code == builtin0.StorageMinerActorCodeID +} + +func (a *Actor) IsMultisigActor() bool { + return a.Code == builtin0.MultisigActorCodeID +} + +func (a *Actor) IsPaymentChannelActor() bool { + return a.Code == builtin0.PaymentChannelActorCodeID +} diff --git a/chain/types/ask.go b/chain/types/ask.go deleted file mode 100644 index 42265c9dc..000000000 --- a/chain/types/ask.go +++ /dev/null @@ -1,27 +0,0 @@ -package types - -import ( - "github.com/filecoin-project/go-address" - cbor "github.com/ipfs/go-ipld-cbor" -) - -func init() { - cbor.RegisterCborType(SignedStorageAsk{}) - cbor.RegisterCborType(StorageAsk{}) -} - -type SignedStorageAsk struct { - Ask *StorageAsk - Signature *Signature -} - -type StorageAsk struct { - // Price per GiB / Epoch - Price BigInt - - MinPieceSize uint64 - Miner address.Address - Timestamp uint64 - Expiry uint64 - SeqNo uint64 -} diff --git a/chain/types/bigint.go b/chain/types/bigint.go index 99fb7c0fb..da4857d5b 100644 --- a/chain/types/bigint.go +++ b/chain/types/bigint.go @@ -1,44 +1,24 @@ package types import ( - "encoding/json" "fmt" - "io" "math/big" - "github.com/filecoin-project/lotus/build" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/polydawn/refmt/obj/atlas" + big2 "github.com/filecoin-project/go-state-types/big" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/build" ) const BigIntMaxSerializedLen = 128 // is this big enough? or too big? -var TotalFilecoinInt = FromFil(build.TotalFilecoin) - -func init() { - cbor.RegisterCborType(atlas.BuildEntry(BigInt{}).Transform(). - TransformMarshal(atlas.MakeMarshalTransformFunc( - func(i BigInt) ([]byte, error) { - return i.cborBytes(), nil - })). - TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( - func(x []byte) (BigInt, error) { - return fromCborBytes(x) - })). - Complete()) -} +var TotalFilecoinInt = FromFil(build.FilBase) var EmptyInt = BigInt{} -type BigInt struct { - *big.Int -} +type BigInt = big2.Int func NewInt(i uint64) BigInt { - return BigInt{big.NewInt(0).SetUint64(i)} + return BigInt{Int: big.NewInt(0).SetUint64(i)} } func FromFil(i uint64) BigInt { @@ -47,7 +27,7 @@ func FromFil(i uint64) BigInt { func BigFromBytes(b []byte) BigInt { i := big.NewInt(0).SetBytes(b) - return BigInt{i} + return BigInt{Int: i} } func BigFromString(s string) (BigInt, error) { @@ -56,204 +36,61 @@ func BigFromString(s string) (BigInt, error) { return BigInt{}, fmt.Errorf("failed to parse string as a big int") } - return BigInt{v}, nil + return BigInt{Int: v}, nil } func BigMul(a, b BigInt) BigInt { - return BigInt{big.NewInt(0).Mul(a.Int, b.Int)} + return BigInt{Int: big.NewInt(0).Mul(a.Int, b.Int)} } func BigDiv(a, b BigInt) BigInt { - return BigInt{big.NewInt(0).Div(a.Int, b.Int)} + return BigInt{Int: big.NewInt(0).Div(a.Int, b.Int)} } func BigMod(a, b BigInt) BigInt { - return BigInt{big.NewInt(0).Mod(a.Int, b.Int)} + return BigInt{Int: big.NewInt(0).Mod(a.Int, b.Int)} } func BigAdd(a, b BigInt) BigInt { - return BigInt{big.NewInt(0).Add(a.Int, b.Int)} + return BigInt{Int: big.NewInt(0).Add(a.Int, b.Int)} } func BigSub(a, b BigInt) BigInt { - return BigInt{big.NewInt(0).Sub(a.Int, b.Int)} + return BigInt{Int: big.NewInt(0).Sub(a.Int, b.Int)} } func BigCmp(a, b BigInt) int { return a.Int.Cmp(b.Int) } -func (bi BigInt) Nil() bool { - return bi.Int == nil -} +var byteSizeUnits = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB"} -// LessThan returns true if bi < o -func (bi BigInt) LessThan(o BigInt) bool { - return BigCmp(bi, o) < 0 -} - -// GreaterThan returns true if bi > o -func (bi BigInt) GreaterThan(o BigInt) bool { - return BigCmp(bi, o) > 0 -} - -// Equals returns true if bi == o -func (bi BigInt) Equals(o BigInt) bool { - return BigCmp(bi, o) == 0 -} - -func (bi *BigInt) MarshalJSON() ([]byte, error) { - return json.Marshal(bi.String()) -} - -func (bi *BigInt) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - - i, ok := big.NewInt(0).SetString(s, 10) - if !ok { - if string(s) == "" { - return nil - } - return xerrors.Errorf("failed to parse bigint string: '%s'", string(b)) - } - - bi.Int = i - return nil -} - -var sizeUnits = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB"} - -func (bi BigInt) SizeStr() string { +func SizeStr(bi BigInt) string { r := new(big.Rat).SetInt(bi.Int) den := big.NewRat(1, 1024) var i int - for f, _ := r.Float64(); f >= 1024 && i+1 < len(sizeUnits); f, _ = r.Float64() { + for f, _ := r.Float64(); f >= 1024 && i+1 < len(byteSizeUnits); f, _ = r.Float64() { i++ r = r.Mul(r, den) } f, _ := r.Float64() - return fmt.Sprintf("%.3g %s", f, sizeUnits[i]) + return fmt.Sprintf("%.4g %s", f, byteSizeUnits[i]) } -func (bi *BigInt) Scan(value interface{}) error { - switch value := value.(type) { - case string: - i, ok := big.NewInt(0).SetString(value, 10) - if !ok { - if value == "" { - return nil - } - return xerrors.Errorf("failed to parse bigint string: '%s'", value) - } +var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"} - bi.Int = i +func DeciStr(bi BigInt) string { + r := new(big.Rat).SetInt(bi.Int) + den := big.NewRat(1, 1024) - return nil - case int64: - bi.Int = big.NewInt(value) - return nil - default: - return xerrors.Errorf("non-string types unsupported: %T", value) + var i int + for f, _ := r.Float64(); f >= 1024 && i+1 < len(deciUnits); f, _ = r.Float64() { + i++ + r = r.Mul(r, den) } -} - -func (bi *BigInt) cborBytes() []byte { - if bi.Int == nil { - return []byte{} - } - - switch { - case bi.Sign() > 0: - return append([]byte{0}, bi.Bytes()...) - case bi.Sign() < 0: - return append([]byte{1}, bi.Bytes()...) - default: // bi.Sign() == 0: - return []byte{} - } -} - -func fromCborBytes(buf []byte) (BigInt, error) { - if len(buf) == 0 { - return NewInt(0), nil - } - - var negative bool - switch buf[0] { - case 0: - negative = false - case 1: - negative = true - default: - return EmptyInt, fmt.Errorf("big int prefix should be either 0 or 1, got %d", buf[0]) - } - - i := big.NewInt(0).SetBytes(buf[1:]) - if negative { - i.Neg(i) - } - - return BigInt{i}, nil -} - -func (bi *BigInt) MarshalCBOR(w io.Writer) error { - if bi.Int == nil { - zero := NewInt(0) - return zero.MarshalCBOR(w) - } - - enc := bi.cborBytes() - - header := cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(enc))) - if _, err := w.Write(header); err != nil { - return err - } - - if _, err := w.Write(enc); err != nil { - return err - } - - return nil -} - -func (bi *BigInt) UnmarshalCBOR(br io.Reader) error { - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - - if maj != cbg.MajByteString { - return fmt.Errorf("cbor input for fil big int was not a byte string (%x)", maj) - } - - if extra == 0 { - bi.Int = big.NewInt(0) - return nil - } - - if extra > BigIntMaxSerializedLen { - return fmt.Errorf("big integer byte array too long") - } - - buf := make([]byte, extra) - if _, err := io.ReadFull(br, buf); err != nil { - return err - } - - i, err := fromCborBytes(buf) - if err != nil { - return err - } - - *bi = i - - return nil -} - -func (bi *BigInt) IsZero() bool { - return bi.Int.Sign() == 0 + + f, _ := r.Float64() + return fmt.Sprintf("%.3g %s", f, deciUnits[i]) } diff --git a/chain/types/bigint_test.go b/chain/types/bigint_test.go index be77acbde..b66528db3 100644 --- a/chain/types/bigint_test.go +++ b/chain/types/bigint_test.go @@ -3,7 +3,12 @@ package types import ( "bytes" "math/big" + "math/rand" + "strings" "testing" + "time" + + "github.com/docker/go-units" "github.com/stretchr/testify/assert" ) @@ -38,7 +43,7 @@ func TestBigIntSerializationRoundTrip(t *testing.T) { func TestFilRoundTrip(t *testing.T) { testValues := []string{ - "0", "1", "1.001", "100.10001", "101100", "5000.01", "5000", + "0 FIL", "1 FIL", "1.001 FIL", "100.10001 FIL", "101100 FIL", "5000.01 FIL", "5000 FIL", } for _, v := range testValues { @@ -60,14 +65,32 @@ func TestSizeStr(t *testing.T) { }{ {0, "0 B"}, {1, "1 B"}, + {1016, "1016 B"}, {1024, "1 KiB"}, - {2000, "1.95 KiB"}, + {1000 * 1024, "1000 KiB"}, + {2000, "1.953 KiB"}, {5 << 20, "5 MiB"}, {11 << 60, "11 EiB"}, } for _, c := range cases { - assert.Equal(t, c.out, NewInt(c.in).SizeStr(), "input %+v, produced wrong result", c) + assert.Equal(t, c.out, SizeStr(NewInt(c.in)), "input %+v, produced wrong result", c) + } +} + +func TestSizeStrUnitsSymmetry(t *testing.T) { + s := rand.NewSource(time.Now().UnixNano()) + r := rand.New(s) + + for i := 0; i < 10000; i++ { + n := r.Uint64() + l := strings.ReplaceAll(units.BytesSize(float64(n)), " ", "") + r := strings.ReplaceAll(SizeStr(NewInt(n)), " ", "") + + assert.NotContains(t, l, "e+") + assert.NotContains(t, r, "e+") + + assert.Equal(t, l, r, "wrong formatting for %d", n) } } @@ -75,6 +98,6 @@ func TestSizeStrBig(t *testing.T) { ZiB := big.NewInt(50000) ZiB = ZiB.Lsh(ZiB, 70) - assert.Equal(t, "5e+04 ZiB", BigInt{Int: ZiB}.SizeStr(), "inout %+v, produced wrong result", ZiB) + assert.Equal(t, "5e+04 ZiB", SizeStr(BigInt{Int: ZiB}), "inout %+v, produced wrong result", ZiB) } diff --git a/chain/types/bitfield.go b/chain/types/bitfield.go deleted file mode 100644 index d959e4f84..000000000 --- a/chain/types/bitfield.go +++ /dev/null @@ -1,200 +0,0 @@ -package types - -import ( - "fmt" - "io" - - rlepluslazy "github.com/filecoin-project/lotus/lib/rlepluslazy" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" -) - -type BitField struct { - rle rlepluslazy.RLE - - bits map[uint64]struct{} -} - -func NewBitField() BitField { - rle, err := rlepluslazy.FromBuf([]byte{}) - if err != nil { - panic(err) - } - return BitField{ - rle: rle, - bits: make(map[uint64]struct{}), - } -} - -func BitFieldFromSet(setBits []uint64) BitField { - res := BitField{bits: make(map[uint64]struct{})} - for _, b := range setBits { - res.bits[b] = struct{}{} - } - return res -} - -func MergeBitFields(a, b BitField) (BitField, error) { - ra, err := a.rle.RunIterator() - if err != nil { - return BitField{}, err - } - - rb, err := b.rle.RunIterator() - if err != nil { - return BitField{}, err - } - - merge, err := rlepluslazy.Sum(ra, rb) - if err != nil { - return BitField{}, err - } - - mergebytes, err := rlepluslazy.EncodeRuns(merge, nil) - if err != nil { - return BitField{}, err - } - - rle, err := rlepluslazy.FromBuf(mergebytes) - if err != nil { - return BitField{}, err - } - - return BitField{ - rle: rle, - bits: make(map[uint64]struct{}), - }, nil -} - -func (bf BitField) sum() (rlepluslazy.RunIterator, error) { - if len(bf.bits) == 0 { - return bf.rle.RunIterator() - } - - a, err := bf.rle.RunIterator() - if err != nil { - return nil, err - } - slc := make([]uint64, 0, len(bf.bits)) - for b := range bf.bits { - slc = append(slc, b) - } - - b, err := rlepluslazy.RunsFromSlice(slc) - if err != nil { - return nil, err - } - - res, err := rlepluslazy.Sum(a, b) - if err != nil { - return nil, err - } - return res, nil -} - -// Set ...s bit in the BitField -func (bf BitField) Set(bit uint64) { - bf.bits[bit] = struct{}{} -} - -func (bf BitField) Count() (uint64, error) { - s, err := bf.sum() - if err != nil { - return 0, err - } - return rlepluslazy.Count(s) -} - -// All returns all set bits -func (bf BitField) All() ([]uint64, error) { - - runs, err := bf.sum() - if err != nil { - return nil, err - } - - res, err := rlepluslazy.SliceFromRuns(runs) - if err != nil { - return nil, err - } - - return res, nil -} - -func (bf BitField) AllMap() (map[uint64]bool, error) { - - runs, err := bf.sum() - if err != nil { - return nil, err - } - - res, err := rlepluslazy.SliceFromRuns(runs) - if err != nil { - return nil, err - } - - out := make(map[uint64]bool) - for _, i := range res { - out[i] = true - } - return out, nil -} - -func (bf BitField) MarshalCBOR(w io.Writer) error { - ints := make([]uint64, 0, len(bf.bits)) - for i := range bf.bits { - ints = append(ints, i) - } - - s, err := bf.sum() - if err != nil { - return err - } - - rle, err := rlepluslazy.EncodeRuns(s, []byte{}) - if err != nil { - return err - } - - if len(rle) > 8192 { - return xerrors.Errorf("encoded bitfield was too large (%d)", len(rle)) - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(rle)))); err != nil { - return err - } - if _, err = w.Write(rle); err != nil { - return xerrors.Errorf("writing rle: %w", err) - } - return nil -} - -func (bf *BitField) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if extra > 8192 { - return fmt.Errorf("array too large") - } - - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - buf := make([]byte, extra) - if _, err := io.ReadFull(br, buf); err != nil { - return err - } - - rle, err := rlepluslazy.FromBuf(buf) - if err != nil { - return xerrors.Errorf("could not decode rle+: %w", err) - } - bf.rle = rle - bf.bits = make(map[uint64]struct{}) - - return nil -} diff --git a/chain/types/blockheader.go b/chain/types/blockheader.go index 23fdab0f7..0ec33fe42 100644 --- a/chain/types/blockheader.go +++ b/chain/types/blockheader.go @@ -2,16 +2,17 @@ package types import ( "bytes" - "context" "math/big" - "github.com/filecoin-project/go-sectorbuilder" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/minio/sha256-simd" - "github.com/multiformats/go-multihash" - "go.opencensus.io/trace" xerrors "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -23,52 +24,73 @@ type Ticket struct { VRFProof []byte } -type EPostTicket struct { - Partial []byte - SectorID uint64 - ChallengeIndex uint64 +func (t *Ticket) Quality() float64 { + ticketHash := blake2b.Sum256(t.VRFProof) + ticketNum := BigFromBytes(ticketHash[:]).Int + ticketDenu := big.NewInt(1) + ticketDenu.Lsh(ticketDenu, 256) + tv, _ := new(big.Rat).SetFrac(ticketNum, ticketDenu).Float64() + tq := 1 - tv + return tq } -type EPostProof struct { - Proof []byte - PostRand []byte - Candidates []EPostTicket +type BeaconEntry struct { + Round uint64 + Data []byte +} + +func NewBeaconEntry(round uint64, data []byte) BeaconEntry { + return BeaconEntry{ + Round: round, + Data: data, + } } type BlockHeader struct { - Miner address.Address + Miner address.Address // 0 - Ticket *Ticket + Ticket *Ticket // 1 - EPostProof EPostProof + ElectionProof *ElectionProof // 2 - Parents []cid.Cid + BeaconEntries []BeaconEntry // 3 - ParentWeight BigInt + WinPoStProof []proof.PoStProof // 4 - Height uint64 + Parents []cid.Cid // 5 - ParentStateRoot cid.Cid + ParentWeight BigInt // 6 - ParentMessageReceipts cid.Cid + Height abi.ChainEpoch // 7 - Messages cid.Cid + ParentStateRoot cid.Cid // 8 - BLSAggregate Signature + ParentMessageReceipts cid.Cid // 8 - Timestamp uint64 + Messages cid.Cid // 10 - BlockSig *Signature + BLSAggregate *crypto.Signature // 11 + + Timestamp uint64 // 12 + + BlockSig *crypto.Signature // 13 + + ForkSignaling uint64 // 14 + + // ParentBaseFee is the base fee after executing parent tipset + ParentBaseFee abi.TokenAmount // 15 + + // internal + validated bool // true if the signature has been validated } -func (b *BlockHeader) ToStorageBlock() (block.Block, error) { - data, err := b.Serialize() +func (blk *BlockHeader) ToStorageBlock() (block.Block, error) { + data, err := blk.Serialize() if err != nil { return nil, err } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(data) + c, err := abi.CidBuilder.Sum(data) if err != nil { return nil, err } @@ -76,8 +98,8 @@ func (b *BlockHeader) ToStorageBlock() (block.Block, error) { return block.NewBlockWithCid(data, c) } -func (b *BlockHeader) Cid() cid.Cid { - sb, err := b.ToStorageBlock() +func (blk *BlockHeader) Cid() cid.Cid { + sb, err := blk.ToStorageBlock() if err != nil { panic(err) // Not sure i'm entirely comfortable with this one, needs to be checked } @@ -114,16 +136,12 @@ func (blk *BlockHeader) SigningBytes() ([]byte, error) { return blkcopy.Serialize() } -func (blk *BlockHeader) CheckBlockSignature(ctx context.Context, worker address.Address) error { - _, span := trace.StartSpan(ctx, "checkBlockSignature") - defer span.End() +func (blk *BlockHeader) SetValidated() { + blk.validated = true +} - sigb, err := blk.SigningBytes() - if err != nil { - return xerrors.Errorf("failed to get block signing bytes: %w", err) - } - - return blk.BlockSig.Verify(worker, sigb) +func (blk *BlockHeader) IsValidated() bool { + return blk.validated } type MsgMeta struct { @@ -140,13 +158,12 @@ func (mm *MsgMeta) Cid() cid.Cid { } func (mm *MsgMeta) ToStorageBlock() (block.Block, error) { - buf := new(bytes.Buffer) - if err := mm.MarshalCBOR(buf); err != nil { + var buf bytes.Buffer + if err := mm.MarshalCBOR(&buf); err != nil { return nil, xerrors.Errorf("failed to marshal MsgMeta: %w", err) } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(buf.Bytes()) + c, err := abi.CidBuilder.Sum(buf.Bytes()) if err != nil { return nil, err } @@ -173,53 +190,60 @@ func CidArrsEqual(a, b []cid.Cid) bool { return true } +func CidArrsSubset(a, b []cid.Cid) bool { + // order ignoring compare... + s := make(map[cid.Cid]bool) + for _, c := range b { + s[c] = true + } + + for _, c := range a { + if !s[c] { + return false + } + } + return true +} + +func CidArrsContains(a []cid.Cid, b cid.Cid) bool { + for _, elem := range a { + if elem.Equals(b) { + return true + } + } + return false +} + var blocksPerEpoch = NewInt(build.BlocksPerEpoch) const sha256bits = 256 -func IsTicketWinner(partialTicket []byte, ssizeI uint64, snum uint64, totpow BigInt) bool { - ssize := NewInt(ssizeI) - ssampled := ElectionPostChallengeCount(snum, 0) // TODO: faults in epost? +func IsTicketWinner(vrfTicket []byte, mypow BigInt, totpow BigInt) bool { /* Need to check that - (h(vrfout) + 1) / (max(h) + 1) <= e * sectorSize / totalPower + (h(vrfout) + 1) / (max(h) + 1) <= e * myPower / totalPower max(h) == 2^256-1 which in terms of integer math means: - (h(vrfout) + 1) * totalPower <= e * sectorSize * 2^256 + (h(vrfout) + 1) * totalPower <= e * myPower * 2^256 in 2^256 space, it is equivalent to: - h(vrfout) * totalPower < e * sectorSize * 2^256 + h(vrfout) * totalPower < e * myPower * 2^256 - Because of SectorChallengeRatioDiv sampling for proofs - we need to scale this appropriately. - - Let c = ceil(numSectors/SectorChallengeRatioDiv) - (c is the number of tickets a miner requests) - Accordingly we check - (h(vrfout) + 1) / 2^256 <= e * sectorSize / totalPower * snum / c - or - h(vrfout) * totalPower * c < e * sectorSize * 2^256 * snum */ - h := sha256.Sum256(partialTicket) + h := blake2b.Sum256(vrfTicket) lhs := BigFromBytes(h[:]).Int lhs = lhs.Mul(lhs, totpow.Int) - lhs = lhs.Mul(lhs, new(big.Int).SetUint64(ssampled)) // rhs = sectorSize * 2^256 // rhs = sectorSize << 256 - rhs := new(big.Int).Lsh(ssize.Int, sha256bits) - rhs = rhs.Mul(rhs, new(big.Int).SetUint64(snum)) + rhs := new(big.Int).Lsh(mypow.Int, sha256bits) rhs = rhs.Mul(rhs, blocksPerEpoch.Int) // h(vrfout) * totalPower < e * sectorSize * 2^256? return lhs.Cmp(rhs) < 0 } -func ElectionPostChallengeCount(sectors uint64, faults int) uint64 { - return sectorbuilder.ElectionPostChallengeCount(sectors, faults) -} - func (t *Ticket) Equals(ot *Ticket) bool { return bytes.Equal(t.VRFProof, ot.VRFProof) } diff --git a/chain/types/blockheader_test.go b/chain/types/blockheader_test.go index ee9ca8192..f5faac3b3 100644 --- a/chain/types/blockheader_test.go +++ b/chain/types/blockheader_test.go @@ -2,12 +2,19 @@ package types import ( "bytes" + "encoding/hex" "fmt" "reflect" "testing" - "github.com/filecoin-project/go-address" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" ) func testBlockHeader(t testing.TB) *BlockHeader { @@ -25,21 +32,21 @@ func testBlockHeader(t testing.TB) *BlockHeader { return &BlockHeader{ Miner: addr, - EPostProof: EPostProof{ - Proof: []byte("pruuf"), - PostRand: []byte("random"), - }, Ticket: &Ticket{ VRFProof: []byte("vrf proof0000000vrf proof0000000"), }, + ElectionProof: &ElectionProof{ + VRFProof: []byte("vrf proof0000000vrf proof0000000"), + }, Parents: []cid.Cid{c, c}, ParentMessageReceipts: c, - BLSAggregate: Signature{Type: KTBLS, Data: []byte("boo! im a signature")}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, ParentWeight: NewInt(123125126212), Messages: c, Height: 85919298723, ParentStateRoot: c, - BlockSig: &Signature{Type: KTBLS, Data: []byte("boo! im a signature")}, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, + ParentBaseFee: NewInt(3432432843291), } } @@ -63,6 +70,60 @@ func TestBlockHeaderSerialization(t *testing.T) { } } +func TestInteropBH(t *testing.T) { + newAddr, err := address.NewSecp256k1Address([]byte("address0")) + + if err != nil { + t.Fatal(err) + } + + mcid, err := cid.Parse("bafy2bzaceaxyj7xq27gc2747adjcirpxx52tt7owqx6z6kckun7tqivvoym4y") + if err != nil { + t.Fatal(err) + } + + posts := []proof.PoStProof{ + {PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, ProofBytes: []byte{0x07}}, + } + + bh := &BlockHeader{ + Miner: newAddr, + Ticket: &Ticket{[]byte{0x01, 0x02, 0x03}}, + ElectionProof: &ElectionProof{0, []byte{0x0a, 0x0b}}, + BeaconEntries: []BeaconEntry{ + { + Round: 5, + Data: []byte{0x0c}, + //prevRound: 0, + }, + }, + Height: 2, + Messages: mcid, + ParentMessageReceipts: mcid, + Parents: []cid.Cid{mcid}, + ParentWeight: NewInt(1000), + ForkSignaling: 3, + ParentStateRoot: mcid, + Timestamp: 1, + WinPoStProof: posts, + BlockSig: &crypto.Signature{ + Type: crypto.SigTypeBLS, + Data: []byte{0x3}, + }, + BLSAggregate: &crypto.Signature{}, + ParentBaseFee: NewInt(1000000000), + } + + bhsb, err := bh.SigningBytes() + + if err != nil { + t.Fatal(err) + } + + gfc := "905501d04cb15021bf6bd003073d79e2238d4e61f1ad2281430102038200420a0b818205410c818200410781d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc430003e802d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc410001f60345003b9aca00" + require.Equal(t, gfc, hex.EncodeToString(bhsb)) +} + func BenchmarkBlockHeaderMarshal(b *testing.B) { bh := testBlockHeader(b) diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index 407f89809..f95df33bc 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -1,28 +1,35 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + package types import ( "fmt" "io" - "math" + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + exitcode "github.com/filecoin-project/go-state-types/exitcode" + proof "github.com/filecoin-project/specs-actors/actors/runtime/proof" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - var _ = xerrors.Errorf +var lengthBufBlockHeader = []byte{144} + func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{140}); err != nil { + if _, err := w.Write(lengthBufBlockHeader); err != nil { return err } + scratch := make([]byte, 9) + // t.Miner (address.Address) (struct) if err := t.Miner.MarshalCBOR(w); err != nil { return err @@ -33,74 +40,123 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { return err } - // t.EPostProof (types.EPostProof) (struct) - if err := t.EPostProof.MarshalCBOR(w); err != nil { + // t.ElectionProof (types.ElectionProof) (struct) + if err := t.ElectionProof.MarshalCBOR(w); err != nil { return err } + // t.BeaconEntries ([]types.BeaconEntry) (slice) + if len(t.BeaconEntries) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.BeaconEntries was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BeaconEntries))); err != nil { + return err + } + for _, v := range t.BeaconEntries { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.WinPoStProof ([]proof.PoStProof) (slice) + if len(t.WinPoStProof) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.WinPoStProof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.WinPoStProof))); err != nil { + return err + } + for _, v := range t.WinPoStProof { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + // t.Parents ([]cid.Cid) (slice) if len(t.Parents) > cbg.MaxLength { return xerrors.Errorf("Slice value in field t.Parents was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Parents)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Parents))); err != nil { return err } for _, v := range t.Parents { - if err := cbg.WriteCid(w, v); err != nil { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { return xerrors.Errorf("failed writing cid field t.Parents: %w", err) } } - // t.ParentWeight (types.BigInt) (struct) + // t.ParentWeight (big.Int) (struct) if err := t.ParentWeight.MarshalCBOR(w); err != nil { return err } - // t.Height (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Height))); err != nil { - return err + // t.Height (abi.ChainEpoch) (int64) + if t.Height >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Height)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil { + return err + } } // t.ParentStateRoot (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.ParentStateRoot); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.ParentStateRoot); err != nil { return xerrors.Errorf("failed to write cid field t.ParentStateRoot: %w", err) } // t.ParentMessageReceipts (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.ParentMessageReceipts); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.ParentMessageReceipts); err != nil { return xerrors.Errorf("failed to write cid field t.ParentMessageReceipts: %w", err) } // t.Messages (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.Messages); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.Messages); err != nil { return xerrors.Errorf("failed to write cid field t.Messages: %w", err) } - // t.BLSAggregate (types.Signature) (struct) + // t.BLSAggregate (crypto.Signature) (struct) if err := t.BLSAggregate.MarshalCBOR(w); err != nil { return err } // t.Timestamp (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Timestamp))); err != nil { + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { return err } - // t.BlockSig (types.Signature) (struct) + // t.BlockSig (crypto.Signature) (struct) if err := t.BlockSig.MarshalCBOR(w); err != nil { return err } + + // t.ForkSignaling (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ForkSignaling)); err != nil { + return err + } + + // t.ParentBaseFee (big.Int) (struct) + if err := t.ParentBaseFee.MarshalCBOR(w); err != nil { + return err + } return nil } func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = BlockHeader{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -108,7 +164,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 12 { + if extra != 16 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -117,7 +173,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { { if err := t.Miner.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.Miner: %w", err) } } @@ -125,35 +181,101 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { { - pb, err := br.PeekByte() + b, err := br.ReadByte() if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { t.Ticket = new(Ticket) if err := t.Ticket.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.Ticket pointer: %w", err) } } } - // t.EPostProof (types.EPostProof) (struct) + // t.ElectionProof (types.ElectionProof) (struct) { - if err := t.EPostProof.UnmarshalCBOR(br); err != nil { + b, err := br.ReadByte() + if err != nil { return err } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.ElectionProof = new(ElectionProof) + if err := t.ElectionProof.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ElectionProof pointer: %w", err) + } + } } + // t.BeaconEntries ([]types.BeaconEntry) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BeaconEntries: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BeaconEntries = make([]BeaconEntry, extra) + } + + for i := 0; i < int(extra); i++ { + + var v BeaconEntry + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.BeaconEntries[i] = v + } + + // t.WinPoStProof ([]proof.PoStProof) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.WinPoStProof: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.WinPoStProof = make([]proof.PoStProof, extra) + } + + for i := 0; i < int(extra); i++ { + + var v proof.PoStProof + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.WinPoStProof[i] = v + } + // t.Parents ([]cid.Cid) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -165,9 +287,11 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajArray { return fmt.Errorf("expected cbor array") } + if extra > 0 { t.Parents = make([]cid.Cid, extra) } + for i := 0; i < int(extra); i++ { c, err := cbg.ReadCid(br) @@ -177,25 +301,40 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { t.Parents[i] = c } - // t.ParentWeight (types.BigInt) (struct) + // t.ParentWeight (big.Int) (struct) { if err := t.ParentWeight.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.ParentWeight: %w", err) } } - // t.Height (uint64) (uint64) + // t.Height (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err + t.Height = abi.ChainEpoch(extraI) } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Height = uint64(extra) // t.ParentStateRoot (cid.Cid) (struct) { @@ -232,76 +371,119 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { t.Messages = c } - // t.BLSAggregate (types.Signature) (struct) + // t.BLSAggregate (crypto.Signature) (struct) { - if err := t.BLSAggregate.UnmarshalCBOR(br); err != nil { + b, err := br.ReadByte() + if err != nil { return err } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.BLSAggregate = new(crypto.Signature) + if err := t.BLSAggregate.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.BLSAggregate pointer: %w", err) + } + } } // t.Timestamp (uint64) (uint64) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Timestamp = uint64(extra) - // t.BlockSig (types.Signature) (struct) - { - pb, err := br.PeekByte() + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Timestamp = uint64(extra) + + } + // t.BlockSig (crypto.Signature) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { - t.BlockSig = new(Signature) + t.BlockSig = new(crypto.Signature) if err := t.BlockSig.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.BlockSig pointer: %w", err) } } + } + // t.ForkSignaling (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ForkSignaling = uint64(extra) + + } + // t.ParentBaseFee (big.Int) (struct) + + { + + if err := t.ParentBaseFee.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ParentBaseFee: %w", err) + } + } return nil } +var lengthBufTicket = []byte{129} + func (t *Ticket) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{129}); err != nil { + if _, err := w.Write(lengthBufTicket); err != nil { return err } + scratch := make([]byte, 9) + // t.VRFProof ([]uint8) (slice) if len(t.VRFProof) > cbg.ByteArrayMaxLen { return xerrors.Errorf("Byte array in field t.VRFProof was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.VRFProof)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.VRFProof))); err != nil { return err } - if _, err := w.Write(t.VRFProof); err != nil { + + if _, err := w.Write(t.VRFProof[:]); err != nil { return err } return nil } func (t *Ticket) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = Ticket{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -315,7 +497,7 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) error { // t.VRFProof ([]uint8) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -326,178 +508,63 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.VRFProof = make([]byte, extra) - if _, err := io.ReadFull(br, t.VRFProof); err != nil { - return err - } - return nil -} -func (t *EPostProof) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Proof ([]uint8) (slice) - if len(t.Proof) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Proof was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Proof)))); err != nil { - return err - } - if _, err := w.Write(t.Proof); err != nil { - return err - } - - // t.PostRand ([]uint8) (slice) - if len(t.PostRand) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.PostRand was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.PostRand)))); err != nil { - return err - } - if _, err := w.Write(t.PostRand); err != nil { - return err - } - - // t.Candidates ([]types.EPostTicket) (slice) - if len(t.Candidates) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Candidates was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Candidates)))); err != nil { - return err - } - for _, v := range t.Candidates { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *EPostProof) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Proof ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Proof: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Proof = make([]byte, extra) - if _, err := io.ReadFull(br, t.Proof); err != nil { - return err - } - // t.PostRand ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.PostRand: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.PostRand = make([]byte, extra) - if _, err := io.ReadFull(br, t.PostRand); err != nil { - return err - } - // t.Candidates ([]types.EPostTicket) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Candidates: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } if extra > 0 { - t.Candidates = make([]EPostTicket, extra) - } - for i := 0; i < int(extra); i++ { - - var v EPostTicket - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Candidates[i] = v + t.VRFProof = make([]uint8, extra) } + if _, err := io.ReadFull(br, t.VRFProof[:]); err != nil { + return err + } return nil } -func (t *EPostTicket) MarshalCBOR(w io.Writer) error { +var lengthBufElectionProof = []byte{130} + +func (t *ElectionProof) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{131}); err != nil { + if _, err := w.Write(lengthBufElectionProof); err != nil { return err } - // t.Partial ([]uint8) (slice) - if len(t.Partial) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Partial was too long") + scratch := make([]byte, 9) + + // t.WinCount (int64) (int64) + if t.WinCount >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WinCount)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WinCount-1)); err != nil { + return err + } } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Partial)))); err != nil { - return err + // t.VRFProof ([]uint8) (slice) + if len(t.VRFProof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.VRFProof was too long") } - if _, err := w.Write(t.Partial); err != nil { + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.VRFProof))); err != nil { return err } - // t.SectorID (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorID))); err != nil { - return err - } - - // t.ChallengeIndex (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ChallengeIndex))); err != nil { + if _, err := w.Write(t.VRFProof[:]); err != nil { return err } return nil } -func (t *EPostTicket) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) +func (t *ElectionProof) UnmarshalCBOR(r io.Reader) error { + *t = ElectionProof{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -505,56 +572,75 @@ func (t *EPostTicket) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 3 { + if extra != 2 { return fmt.Errorf("cbor input had wrong number of fields") } - // t.Partial ([]uint8) (slice) + // t.WinCount (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } - maj, extra, err = cbg.CborReadHeader(br) + t.WinCount = int64(extraI) + } + // t.VRFProof ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Partial: byte array too large (%d)", extra) + return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.Partial = make([]byte, extra) - if _, err := io.ReadFull(br, t.Partial); err != nil { - return err - } - // t.SectorID (uint64) (uint64) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err + if extra > 0 { + t.VRFProof = make([]uint8, extra) } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = uint64(extra) - // t.ChallengeIndex (uint64) (uint64) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { + if _, err := io.ReadFull(br, t.VRFProof[:]); err != nil { return err } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ChallengeIndex = uint64(extra) return nil } +var lengthBufMessage = []byte{138} + func (t *Message) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{136}); err != nil { + if _, err := w.Write(lengthBufMessage); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Version (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { return err } @@ -569,27 +655,40 @@ func (t *Message) MarshalCBOR(w io.Writer) error { } // t.Nonce (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Nonce))); err != nil { + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { return err } - // t.Value (types.BigInt) (struct) + // t.Value (big.Int) (struct) if err := t.Value.MarshalCBOR(w); err != nil { return err } - // t.GasPrice (types.BigInt) (struct) - if err := t.GasPrice.MarshalCBOR(w); err != nil { + // t.GasLimit (int64) (int64) + if t.GasLimit >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.GasLimit)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.GasLimit-1)); err != nil { + return err + } + } + + // t.GasFeeCap (big.Int) (struct) + if err := t.GasFeeCap.MarshalCBOR(w); err != nil { return err } - // t.GasLimit (types.BigInt) (struct) - if err := t.GasLimit.MarshalCBOR(w); err != nil { + // t.GasPremium (big.Int) (struct) + if err := t.GasPremium.MarshalCBOR(w); err != nil { return err } - // t.Method (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Method))); err != nil { + // t.Method (abi.MethodNum) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil { return err } @@ -598,19 +697,23 @@ func (t *Message) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Byte array in field t.Params was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Params)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Params))); err != nil { return err } - if _, err := w.Write(t.Params); err != nil { + + if _, err := w.Write(t.Params[:]); err != nil { return err } return nil } func (t *Message) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = Message{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -618,16 +721,30 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 8 { + if extra != 10 { return fmt.Errorf("cbor input had wrong number of fields") } + // t.Version (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Version = uint64(extra) + + } // t.To (address.Address) (struct) { if err := t.To.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.To: %w", err) } } @@ -636,60 +753,93 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error { { if err := t.From.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.From: %w", err) } } // t.Nonce (uint64) (uint64) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Nonce = uint64(extra) - // t.Value (types.BigInt) (struct) + // t.Value (big.Int) (struct) { if err := t.Value.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.Value: %w", err) } } - // t.GasPrice (types.BigInt) (struct) + // t.GasLimit (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.GasLimit = int64(extraI) + } + // t.GasFeeCap (big.Int) (struct) { - if err := t.GasPrice.UnmarshalCBOR(br); err != nil { - return err + if err := t.GasFeeCap.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.GasFeeCap: %w", err) } } - // t.GasLimit (types.BigInt) (struct) + // t.GasPremium (big.Int) (struct) { - if err := t.GasLimit.UnmarshalCBOR(br); err != nil { - return err + if err := t.GasPremium.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.GasPremium: %w", err) } } - // t.Method (uint64) (uint64) + // t.Method (abi.MethodNum) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Method = abi.MethodNum(extra) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Method = uint64(extra) // t.Params ([]uint8) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -700,19 +850,25 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.Params = make([]byte, extra) - if _, err := io.ReadFull(br, t.Params); err != nil { + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Params[:]); err != nil { return err } return nil } +var lengthBufSignedMessage = []byte{130} + func (t *SignedMessage) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write(lengthBufSignedMessage); err != nil { return err } @@ -721,7 +877,7 @@ func (t *SignedMessage) MarshalCBOR(w io.Writer) error { return err } - // t.Signature (types.Signature) (struct) + // t.Signature (crypto.Signature) (struct) if err := t.Signature.MarshalCBOR(w); err != nil { return err } @@ -729,9 +885,12 @@ func (t *SignedMessage) MarshalCBOR(w io.Writer) error { } func (t *SignedMessage) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = SignedMessage{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -748,40 +907,44 @@ func (t *SignedMessage) UnmarshalCBOR(r io.Reader) error { { if err := t.Message.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.Message: %w", err) } } - // t.Signature (types.Signature) (struct) + // t.Signature (crypto.Signature) (struct) { if err := t.Signature.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.Signature: %w", err) } } return nil } +var lengthBufMsgMeta = []byte{130} + func (t *MsgMeta) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write(lengthBufMsgMeta); err != nil { return err } + scratch := make([]byte, 9) + // t.BlsMessages (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.BlsMessages); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.BlsMessages); err != nil { return xerrors.Errorf("failed to write cid field t.BlsMessages: %w", err) } // t.SecpkMessages (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.SecpkMessages); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.SecpkMessages); err != nil { return xerrors.Errorf("failed to write cid field t.SecpkMessages: %w", err) } @@ -789,9 +952,12 @@ func (t *MsgMeta) MarshalCBOR(w io.Writer) error { } func (t *MsgMeta) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = MsgMeta{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -830,404 +996,38 @@ func (t *MsgMeta) UnmarshalCBOR(r io.Reader) error { return nil } -func (t *SignedVoucher) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{137}); err != nil { - return err - } - - // t.TimeLock (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.TimeLock))); err != nil { - return err - } - - // t.SecretPreimage ([]uint8) (slice) - if len(t.SecretPreimage) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.SecretPreimage was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.SecretPreimage)))); err != nil { - return err - } - if _, err := w.Write(t.SecretPreimage); err != nil { - return err - } - - // t.Extra (types.ModVerifyParams) (struct) - if err := t.Extra.MarshalCBOR(w); err != nil { - return err - } - - // t.Lane (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Lane))); err != nil { - return err - } - - // t.Nonce (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Nonce))); err != nil { - return err - } - - // t.Amount (types.BigInt) (struct) - if err := t.Amount.MarshalCBOR(w); err != nil { - return err - } - - // t.MinCloseHeight (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MinCloseHeight))); err != nil { - return err - } - - // t.Merges ([]types.Merge) (slice) - if len(t.Merges) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Merges was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Merges)))); err != nil { - return err - } - for _, v := range t.Merges { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Signature (types.Signature) (struct) - if err := t.Signature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *SignedVoucher) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 9 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.TimeLock (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TimeLock = uint64(extra) - // t.SecretPreimage ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.SecretPreimage: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.SecretPreimage = make([]byte, extra) - if _, err := io.ReadFull(br, t.SecretPreimage); err != nil { - return err - } - // t.Extra (types.ModVerifyParams) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Extra = new(ModVerifyParams) - if err := t.Extra.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - // t.Lane (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Lane = uint64(extra) - // t.Nonce (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Nonce = uint64(extra) - // t.Amount (types.BigInt) (struct) - - { - - if err := t.Amount.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.MinCloseHeight (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MinCloseHeight = uint64(extra) - // t.Merges ([]types.Merge) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Merges: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - if extra > 0 { - t.Merges = make([]Merge, extra) - } - for i := 0; i < int(extra); i++ { - - var v Merge - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Merges[i] = v - } - - // t.Signature (types.Signature) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Signature = new(Signature) - if err := t.Signature.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *ModVerifyParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.Actor (address.Address) (struct) - if err := t.Actor.MarshalCBOR(w); err != nil { - return err - } - - // t.Method (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Method))); err != nil { - return err - } - - // t.Data ([]uint8) (slice) - if len(t.Data) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Data was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Data)))); err != nil { - return err - } - if _, err := w.Write(t.Data); err != nil { - return err - } - return nil -} - -func (t *ModVerifyParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Actor (address.Address) (struct) - - { - - if err := t.Actor.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Method (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Method = uint64(extra) - // t.Data ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Data: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Data = make([]byte, extra) - if _, err := io.ReadFull(br, t.Data); err != nil { - return err - } - return nil -} - -func (t *Merge) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Lane (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Lane))); err != nil { - return err - } - - // t.Nonce (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Nonce))); err != nil { - return err - } - return nil -} - -func (t *Merge) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Lane (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Lane = uint64(extra) - // t.Nonce (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Nonce = uint64(extra) - return nil -} +var lengthBufActor = []byte{132} func (t *Actor) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{132}); err != nil { + if _, err := w.Write(lengthBufActor); err != nil { return err } + scratch := make([]byte, 9) + // t.Code (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.Code); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.Code); err != nil { return xerrors.Errorf("failed to write cid field t.Code: %w", err) } // t.Head (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.Head); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.Head); err != nil { return xerrors.Errorf("failed to write cid field t.Head: %w", err) } // t.Nonce (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Nonce))); err != nil { + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { return err } - // t.Balance (types.BigInt) (struct) + // t.Balance (big.Int) (struct) if err := t.Balance.MarshalCBOR(w); err != nil { return err } @@ -1235,9 +1035,12 @@ func (t *Actor) MarshalCBOR(w io.Writer) error { } func (t *Actor) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = Actor{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1275,38 +1078,52 @@ func (t *Actor) UnmarshalCBOR(r io.Reader) error { } // t.Nonce (uint64) (uint64) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Nonce = uint64(extra) - // t.Balance (types.BigInt) (struct) + // t.Balance (big.Int) (struct) { if err := t.Balance.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.Balance: %w", err) } } return nil } +var lengthBufMessageReceipt = []byte{131} + func (t *MessageReceipt) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{131}); err != nil { + if _, err := w.Write(lengthBufMessageReceipt); err != nil { return err } - // t.ExitCode (uint8) (uint8) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ExitCode))); err != nil { - return err + scratch := make([]byte, 9) + + // t.ExitCode (exitcode.ExitCode) (int64) + if t.ExitCode >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil { + return err + } } // t.Return ([]uint8) (slice) @@ -1314,24 +1131,34 @@ func (t *MessageReceipt) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Byte array in field t.Return was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Return)))); err != nil { - return err - } - if _, err := w.Write(t.Return); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Return))); err != nil { return err } - // t.GasUsed (types.BigInt) (struct) - if err := t.GasUsed.MarshalCBOR(w); err != nil { + if _, err := w.Write(t.Return[:]); err != nil { return err } + + // t.GasUsed (int64) (int64) + if t.GasUsed >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.GasUsed)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.GasUsed-1)); err != nil { + return err + } + } return nil } func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = MessageReceipt{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1343,22 +1170,34 @@ func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input had wrong number of fields") } - // t.ExitCode (uint8) (uint8) + // t.ExitCode (exitcode.ExitCode) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err + t.ExitCode = exitcode.ExitCode(extraI) } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint8 field") - } - if extra > math.MaxUint8 { - return fmt.Errorf("integer in input was too large for uint8 field") - } - t.ExitCode = uint8(extra) // t.Return ([]uint8) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1369,31 +1208,55 @@ func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.Return = make([]byte, extra) - if _, err := io.ReadFull(br, t.Return); err != nil { + + if extra > 0 { + t.Return = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Return[:]); err != nil { return err } - // t.GasUsed (types.BigInt) (struct) - + // t.GasUsed (int64) (int64) { - - if err := t.GasUsed.UnmarshalCBOR(br); err != nil { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { return err } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + t.GasUsed = int64(extraI) } return nil } +var lengthBufBlockMsg = []byte{131} + func (t *BlockMsg) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{131}); err != nil { + if _, err := w.Write(lengthBufBlockMsg); err != nil { return err } + scratch := make([]byte, 9) + // t.Header (types.BlockHeader) (struct) if err := t.Header.MarshalCBOR(w); err != nil { return err @@ -1404,11 +1267,11 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Slice value in field t.BlsMessages was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.BlsMessages)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BlsMessages))); err != nil { return err } for _, v := range t.BlsMessages { - if err := cbg.WriteCid(w, v); err != nil { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { return xerrors.Errorf("failed writing cid field t.BlsMessages: %w", err) } } @@ -1418,11 +1281,11 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Slice value in field t.SecpkMessages was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.SecpkMessages)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.SecpkMessages))); err != nil { return err } for _, v := range t.SecpkMessages { - if err := cbg.WriteCid(w, v); err != nil { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { return xerrors.Errorf("failed writing cid field t.SecpkMessages: %w", err) } } @@ -1430,9 +1293,12 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { } func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = BlockMsg{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1448,26 +1314,24 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { { - pb, err := br.PeekByte() + b, err := br.ReadByte() if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { t.Header = new(BlockHeader) if err := t.Header.UnmarshalCBOR(br); err != nil { - return err + return xerrors.Errorf("unmarshaling t.Header pointer: %w", err) } } } // t.BlsMessages ([]cid.Cid) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1479,9 +1343,11 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajArray { return fmt.Errorf("expected cbor array") } + if extra > 0 { t.BlsMessages = make([]cid.Cid, extra) } + for i := 0; i < int(extra); i++ { c, err := cbg.ReadCid(br) @@ -1493,7 +1359,7 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { // t.SecpkMessages ([]cid.Cid) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1505,9 +1371,11 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajArray { return fmt.Errorf("expected cbor array") } + if extra > 0 { t.SecpkMessages = make([]cid.Cid, extra) } + for i := 0; i < int(extra); i++ { c, err := cbg.ReadCid(br) @@ -1520,223 +1388,29 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { return nil } -func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Ask (types.StorageAsk) (struct) - if err := t.Ask.MarshalCBOR(w); err != nil { - return err - } - - // t.Signature (types.Signature) (struct) - if err := t.Signature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *SignedStorageAsk) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Ask (types.StorageAsk) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Ask = new(StorageAsk) - if err := t.Ask.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - // t.Signature (types.Signature) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Signature = new(Signature) - if err := t.Signature.UnmarshalCBOR(br); err != nil { - return err - } - } - - } - return nil -} - -func (t *StorageAsk) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{134}); err != nil { - return err - } - - // t.Price (types.BigInt) (struct) - if err := t.Price.MarshalCBOR(w); err != nil { - return err - } - - // t.MinPieceSize (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MinPieceSize))); err != nil { - return err - } - - // t.Miner (address.Address) (struct) - if err := t.Miner.MarshalCBOR(w); err != nil { - return err - } - - // t.Timestamp (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Timestamp))); err != nil { - return err - } - - // t.Expiry (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Expiry))); err != nil { - return err - } - - // t.SeqNo (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SeqNo))); err != nil { - return err - } - return nil -} - -func (t *StorageAsk) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 6 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Price (types.BigInt) (struct) - - { - - if err := t.Price.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.MinPieceSize (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MinPieceSize = uint64(extra) - // t.Miner (address.Address) (struct) - - { - - if err := t.Miner.UnmarshalCBOR(br); err != nil { - return err - } - - } - // t.Timestamp (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Timestamp = uint64(extra) - // t.Expiry (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Expiry = uint64(extra) - // t.SeqNo (uint64) (uint64) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SeqNo = uint64(extra) - return nil -} +var lengthBufExpTipSet = []byte{131} func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{131}); err != nil { + if _, err := w.Write(lengthBufExpTipSet); err != nil { return err } + scratch := make([]byte, 9) + // t.Cids ([]cid.Cid) (slice) if len(t.Cids) > cbg.MaxLength { return xerrors.Errorf("Slice value in field t.Cids was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Cids)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Cids))); err != nil { return err } for _, v := range t.Cids { - if err := cbg.WriteCid(w, v); err != nil { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { return xerrors.Errorf("failed writing cid field t.Cids: %w", err) } } @@ -1746,7 +1420,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("Slice value in field t.Blocks was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Blocks)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Blocks))); err != nil { return err } for _, v := range t.Blocks { @@ -1755,17 +1429,26 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { } } - // t.Height (uint64) (uint64) - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Height))); err != nil { - return err + // t.Height (abi.ChainEpoch) (int64) + if t.Height >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Height)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil { + return err + } } return nil } func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + *t = ExpTipSet{} - maj, extra, err := cbg.CborReadHeader(br) + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1779,7 +1462,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { // t.Cids ([]cid.Cid) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1791,9 +1474,11 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajArray { return fmt.Errorf("expected cbor array") } + if extra > 0 { t.Cids = make([]cid.Cid, extra) } + for i := 0; i < int(extra); i++ { c, err := cbg.ReadCid(br) @@ -1805,7 +1490,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { // t.Blocks ([]*types.BlockHeader) (slice) - maj, extra, err = cbg.CborReadHeader(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } @@ -1817,9 +1502,11 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajArray { return fmt.Errorf("expected cbor array") } + if extra > 0 { t.Blocks = make([]*BlockHeader, extra) } + for i := 0; i < int(extra); i++ { var v BlockHeader @@ -1830,15 +1517,248 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { t.Blocks[i] = &v } - // t.Height (uint64) (uint64) + // t.Height (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } - maj, extra, err = cbg.CborReadHeader(br) + t.Height = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufBeaconEntry = []byte{130} + +func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufBeaconEntry); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Round (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Round)); err != nil { + return err + } + + // t.Data ([]uint8) (slice) + if len(t.Data) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Data was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Data))); err != nil { + return err + } + + if _, err := w.Write(t.Data[:]); err != nil { + return err + } + return nil +} + +func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) error { + *t = BeaconEntry{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Round (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Round = uint64(extra) + + } + // t.Data ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Data: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Data = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Data[:]); err != nil { + return err } - t.Height = uint64(extra) + return nil +} + +var lengthBufStateRoot = []byte{131} + +func (t *StateRoot) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStateRoot); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Version (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { + return err + } + + // t.Actors (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Actors); err != nil { + return xerrors.Errorf("failed to write cid field t.Actors: %w", err) + } + + // t.Info (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Info); err != nil { + return xerrors.Errorf("failed to write cid field t.Info: %w", err) + } + + return nil +} + +func (t *StateRoot) UnmarshalCBOR(r io.Reader) error { + *t = StateRoot{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Version (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Version = uint64(extra) + + } + // t.Actors (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Actors: %w", err) + } + + t.Actors = c + + } + // t.Info (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Info: %w", err) + } + + t.Info = c + + } + return nil +} + +var lengthBufStateInfo = []byte{128} + +func (t *StateInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStateInfo); err != nil { + return err + } + + return nil +} + +func (t *StateInfo) UnmarshalCBOR(r io.Reader) error { + *t = StateInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 0 { + return fmt.Errorf("cbor input had wrong number of fields") + } + return nil } diff --git a/chain/types/electionproof.go b/chain/types/electionproof.go new file mode 100644 index 000000000..b8879b27c --- /dev/null +++ b/chain/types/electionproof.go @@ -0,0 +1,205 @@ +package types + +import ( + "math/big" + + "github.com/filecoin-project/lotus/build" + "github.com/minio/blake2b-simd" +) + +type ElectionProof struct { + WinCount int64 + VRFProof []byte +} + +const precision = 256 + +var ( + expNumCoef []*big.Int + expDenoCoef []*big.Int +) + +func init() { + parse := func(coefs []string) []*big.Int { + out := make([]*big.Int, len(coefs)) + for i, coef := range coefs { + c, ok := new(big.Int).SetString(coef, 10) + if !ok { + panic("could not parse exp paramemter") + } + // << 256 (Q.0 to Q.256), >> 128 to transform integer params to coefficients + c = c.Lsh(c, precision-128) + out[i] = c + } + return out + } + + // parameters are in integer format, + // coefficients are *2^-128 of that + num := []string{ + "-648770010757830093818553637600", + "67469480939593786226847644286976", + "-3197587544499098424029388939001856", + "89244641121992890118377641805348864", + "-1579656163641440567800982336819953664", + "17685496037279256458459817590917169152", + "-115682590513835356866803355398940131328", + "340282366920938463463374607431768211456", + } + expNumCoef = parse(num) + + deno := []string{ + "1225524182432722209606361", + "114095592300906098243859450", + "5665570424063336070530214243", + "194450132448609991765137938448", + "5068267641632683791026134915072", + "104716890604972796896895427629056", + "1748338658439454459487681798864896", + "23704654329841312470660182937960448", + "259380097567996910282699886670381056", + "2250336698853390384720606936038375424", + "14978272436876548034486263159246028800", + "72144088983913131323343765784380833792", + "224599776407103106596571252037123047424", + "340282366920938463463374607431768211456", + } + expDenoCoef = parse(deno) +} + +// expneg accepts x in Q.256 format and computes e^-x. +// It is most precise within [0, 1.725) range, where error is less than 3.4e-30. +// Over the [0, 5) range its error is less than 4.6e-15. +// Output is in Q.256 format. +func expneg(x *big.Int) *big.Int { + // exp is approximated by rational function + // polynomials of the rational function are evaluated using Horner's method + num := polyval(expNumCoef, x) // Q.256 + deno := polyval(expDenoCoef, x) // Q.256 + + num = num.Lsh(num, precision) // Q.512 + return num.Div(num, deno) // Q.512 / Q.256 => Q.256 +} + +// polyval evaluates a polynomial given by coefficients `p` in Q.256 format +// at point `x` in Q.256 format. Output is in Q.256. +// Coefficients should be ordered from the highest order coefficient to the lowest. +func polyval(p []*big.Int, x *big.Int) *big.Int { + // evaluation using Horner's method + res := new(big.Int).Set(p[0]) // Q.256 + tmp := new(big.Int) // big.Int.Mul doesn't like when input is reused as output + for _, c := range p[1:] { + tmp = tmp.Mul(res, x) // Q.256 * Q.256 => Q.512 + res = res.Rsh(tmp, precision) // Q.512 >> 256 => Q.256 + res = res.Add(res, c) + } + + return res +} + +// computes lambda in Q.256 +func lambda(power, totalPower *big.Int) *big.Int { + lam := new(big.Int).Mul(power, blocksPerEpoch.Int) // Q.0 + lam = lam.Lsh(lam, precision) // Q.256 + lam = lam.Div(lam /* Q.256 */, totalPower /* Q.0 */) // Q.256 + return lam +} + +var MaxWinCount = 3 * int64(build.BlocksPerEpoch) + +type poiss struct { + lam *big.Int + pmf *big.Int + icdf *big.Int + + tmp *big.Int // temporary variable for optimization + + k uint64 +} + +// newPoiss starts poisson inverted CDF +// lambda is in Q.256 format +// returns (instance, `1-poisscdf(0, lambda)`) +// CDF value returend is reused when calling `next` +func newPoiss(lambda *big.Int) (*poiss, *big.Int) { + + // pmf(k) = (lambda^k)*(e^lambda) / k! + // k = 0 here, so it simplifies to just e^-lambda + elam := expneg(lambda) // Q.256 + pmf := new(big.Int).Set(elam) + + // icdf(k) = 1 - ∑ᵏᵢ₌₀ pmf(i) + // icdf(0) = 1 - pmf(0) + icdf := big.NewInt(1) + icdf = icdf.Lsh(icdf, precision) // Q.256 + icdf = icdf.Sub(icdf, pmf) // Q.256 + + k := uint64(0) + + p := &poiss{ + lam: lambda, + pmf: pmf, + + tmp: elam, + icdf: icdf, + + k: k, + } + + return p, icdf +} + +// next computes `k++, 1-poisscdf(k, lam)` +// return is in Q.256 format +func (p *poiss) next() *big.Int { + // incrementally compute next pmf and icdf + + // pmf(k) = (lambda^k)*(e^lambda) / k! + // so pmf(k) = pmf(k-1) * lambda / k + p.k++ + p.tmp.SetUint64(p.k) // Q.0 + + // calculate pmf for k + p.pmf = p.pmf.Div(p.pmf, p.tmp) // Q.256 / Q.0 => Q.256 + // we are using `tmp` as target for multiplication as using an input as output + // for Int.Mul causes allocations + p.tmp = p.tmp.Mul(p.pmf, p.lam) // Q.256 * Q.256 => Q.512 + p.pmf = p.pmf.Rsh(p.tmp, precision) // Q.512 >> 256 => Q.256 + + // calculate output + // icdf(k) = icdf(k-1) - pmf(k) + p.icdf = p.icdf.Sub(p.icdf, p.pmf) // Q.256 + return p.icdf +} + +// ComputeWinCount uses VRFProof to compute number of wins +// The algorithm is based on Algorand's Sortition with Binomial distribution +// replaced by Poisson distribution. +func (ep *ElectionProof) ComputeWinCount(power BigInt, totalPower BigInt) int64 { + h := blake2b.Sum256(ep.VRFProof) + + lhs := BigFromBytes(h[:]).Int // 256bits, assume Q.256 so [0, 1) + + // We are calculating upside-down CDF of Poisson distribution with + // rate λ=power*E/totalPower + // Steps: + // 1. calculate λ=power*E/totalPower + // 2. calculate elam = exp(-λ) + // 3. Check how many times we win: + // j = 0 + // pmf = elam + // rhs = 1 - pmf + // for h(vrf) < rhs: j++; pmf = pmf * lam / j; rhs = rhs - pmf + + lam := lambda(power.Int, totalPower.Int) // Q.256 + + p, rhs := newPoiss(lam) + + var j int64 + for lhs.Cmp(rhs) < 0 && j < MaxWinCount { + rhs = p.next() + j++ + } + + return j +} diff --git a/chain/types/electionproof_test.go b/chain/types/electionproof_test.go new file mode 100644 index 000000000..9344ff6a6 --- /dev/null +++ b/chain/types/electionproof_test.go @@ -0,0 +1,145 @@ +package types + +import ( + "bytes" + "fmt" + "math/big" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xorcare/golden" +) + +func TestPoissonFunction(t *testing.T) { + tests := []struct { + lambdaBase uint64 + lambdaShift uint + }{ + {10, 10}, // 0.0097 + {209714, 20}, // 0.19999885 + {1036915, 20}, // 0.9888792038 + {1706, 10}, // 1.6660 + {2, 0}, // 2 + {5242879, 20}, //4.9999990 + {5, 0}, // 5 + } + + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("lam-%d-%d", test.lambdaBase, test.lambdaShift), func(t *testing.T) { + b := &bytes.Buffer{} + b.WriteString("icdf\n") + + lam := new(big.Int).SetUint64(test.lambdaBase) + lam = lam.Lsh(lam, precision-test.lambdaShift) + p, icdf := newPoiss(lam) + + b.WriteString(icdf.String()) + b.WriteRune('\n') + + for i := 0; i < 15; i++ { + b.WriteString(p.next().String()) + b.WriteRune('\n') + } + golden.Assert(t, []byte(b.String())) + }) + } +} + +func TestLambdaFunction(t *testing.T) { + tests := []struct { + power string + totalPower string + target float64 + }{ + {"10", "100", .1 * 5.}, + {"1024", "2048", 0.5 * 5.}, + {"2000000000000000", "100000000000000000", 0.02 * 5.}, + } + + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("%s-%s", test.power, test.totalPower), func(t *testing.T) { + pow, ok := new(big.Int).SetString(test.power, 10) + assert.True(t, ok) + total, ok := new(big.Int).SetString(test.totalPower, 10) + assert.True(t, ok) + lam := lambda(pow, total) + assert.Equal(t, test.target, q256ToF(lam)) + golden.Assert(t, []byte(lam.String())) + }) + } +} + +func TestExpFunction(t *testing.T) { + const N = 256 + + step := big.NewInt(5) + step = step.Lsh(step, 256) // Q.256 + step = step.Div(step, big.NewInt(N-1)) + + x := big.NewInt(0) + b := &bytes.Buffer{} + + b.WriteString("x, y\n") + for i := 0; i < N; i++ { + y := expneg(x) + fmt.Fprintf(b, "%s,%s\n", x, y) + x = x.Add(x, step) + } + + golden.Assert(t, b.Bytes()) +} + +func q256ToF(x *big.Int) float64 { + deno := big.NewInt(1) + deno = deno.Lsh(deno, 256) + rat := new(big.Rat).SetFrac(x, deno) + f, _ := rat.Float64() + return f +} + +func TestElectionLam(t *testing.T) { + p := big.NewInt(64) + tot := big.NewInt(128) + lam := lambda(p, tot) + target := 64. * 5. / 128. + if q256ToF(lam) != target { + t.Fatalf("wrong lambda: %f, should be: %f", q256ToF(lam), target) + } +} + +var Res int64 + +func BenchmarkWinCounts(b *testing.B) { + totalPower := NewInt(100) + power := NewInt(100) + ep := &ElectionProof{VRFProof: nil} + var res int64 + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + ep.VRFProof = []byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24), byte(i >> 32)} + j := ep.ComputeWinCount(power, totalPower) + res += j + } + Res += res +} + +func TestWinCounts(t *testing.T) { + t.SkipNow() + totalPower := NewInt(100) + power := NewInt(30) + + f, _ := os.Create("output.wins") + fmt.Fprintf(f, "wins\n") + ep := &ElectionProof{VRFProof: nil} + for i := uint64(0); i < 1000000; i++ { + i := i + 1000000 + ep.VRFProof = []byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24), byte(i >> 32)} + j := ep.ComputeWinCount(power, totalPower) + fmt.Fprintf(f, "%d\n", j) + } +} diff --git a/chain/types/execresult.go b/chain/types/execresult.go new file mode 100644 index 000000000..6fc93fac6 --- /dev/null +++ b/chain/types/execresult.go @@ -0,0 +1,103 @@ +package types + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + "time" +) + +type ExecutionTrace struct { + Msg *Message + MsgRct *MessageReceipt + Error string + Duration time.Duration + GasCharges []*GasTrace + + Subcalls []ExecutionTrace +} + +type GasTrace struct { + Name string + + Location []Loc `json:"loc"` + TotalGas int64 `json:"tg"` + ComputeGas int64 `json:"cg"` + StorageGas int64 `json:"sg"` + TotalVirtualGas int64 `json:"vtg"` + VirtualComputeGas int64 `json:"vcg"` + VirtualStorageGas int64 `json:"vsg"` + + TimeTaken time.Duration `json:"tt"` + Extra interface{} `json:"ex,omitempty"` + + Callers []uintptr `json:"-"` +} + +type Loc struct { + File string + Line int + Function string +} + +func (l Loc) Show() bool { + ignorePrefix := []string{ + "reflect.", + "github.com/filecoin-project/lotus/chain/vm.(*Invoker).transform", + "github.com/filecoin-project/go-amt-ipld/", + } + for _, pre := range ignorePrefix { + if strings.HasPrefix(l.Function, pre) { + return false + } + } + return true +} +func (l Loc) String() string { + file := strings.Split(l.File, "/") + + fn := strings.Split(l.Function, "/") + var fnpkg string + if len(fn) > 2 { + fnpkg = strings.Join(fn[len(fn)-2:], "/") + } else { + fnpkg = l.Function + } + + return fmt.Sprintf("%s@%s:%d", fnpkg, file[len(file)-1], l.Line) +} + +func (l Loc) Important() bool { + if strings.HasPrefix(l.Function, "github.com/filecoin-project/specs-actors/actors/builtin") { + return true + } + return false +} + +func (gt *GasTrace) MarshalJSON() ([]byte, error) { + type GasTraceCopy GasTrace + if len(gt.Location) == 0 { + if len(gt.Callers) != 0 { + frames := runtime.CallersFrames(gt.Callers) + for { + frame, more := frames.Next() + if frame.Function == "github.com/filecoin-project/lotus/chain/vm.(*VM).ApplyMessage" { + break + } + l := Loc{ + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + gt.Location = append(gt.Location, l) + if !more { + break + } + } + } + } + + cpy := (*GasTraceCopy)(gt) + return json.Marshal(cpy) +} diff --git a/chain/types/fil.go b/chain/types/fil.go index 80de6ced3..99a896e38 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -1,6 +1,7 @@ package types import ( + "encoding" "fmt" "math/big" "strings" @@ -11,11 +12,11 @@ import ( type FIL BigInt func (f FIL) String() string { - r := new(big.Rat).SetFrac(f.Int, big.NewInt(build.FilecoinPrecision)) + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(build.FilecoinPrecision))) if r.Sign() == 0 { - return "0" + return "0 FIL" } - return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".") + return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".") + " FIL" } func (f FIL) Format(s fmt.State, ch rune) { @@ -27,16 +28,54 @@ func (f FIL) Format(s fmt.State, ch rune) { } } +func (f FIL) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f FIL) UnmarshalText(text []byte) error { + p, err := ParseFIL(string(text)) + if err != nil { + return err + } + + f.Int.Set(p.Int) + return nil +} + func ParseFIL(s string) (FIL, error) { + suffix := strings.TrimLeft(s, ".1234567890") + s = s[:len(s)-len(suffix)] + var attofil bool + if suffix != "" { + norm := strings.ToLower(strings.TrimSpace(suffix)) + switch norm { + case "", "fil": + case "attofil", "afil": + attofil = true + default: + return FIL{}, fmt.Errorf("unrecognized suffix: %q", suffix) + } + } + r, ok := new(big.Rat).SetString(s) if !ok { return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s) } - r = r.Mul(r, big.NewRat(build.FilecoinPrecision, 1)) + if !attofil { + r = r.Mul(r, big.NewRat(int64(build.FilecoinPrecision), 1)) + } + if !r.IsInt() { - return FIL{}, fmt.Errorf("invalid FIL value: %q", s) + var pref string + if attofil { + pref = "atto" + } + return FIL{}, fmt.Errorf("invalid %sFIL value: %q", pref, s) } return FIL{r.Num()}, nil } + +var _ encoding.TextMarshaler = (*FIL)(nil) +var _ encoding.TextUnmarshaler = (*FIL)(nil) diff --git a/chain/types/invokeret.go b/chain/types/invokeret.go deleted file mode 100644 index 030b728ae..000000000 --- a/chain/types/invokeret.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -type InvokeRet struct { - Result []byte - ReturnCode byte -} diff --git a/chain/types/message.go b/chain/types/message.go index 8d08dc53c..4fead44bc 100644 --- a/chain/types/message.go +++ b/chain/types/message.go @@ -4,34 +4,66 @@ import ( "bytes" "fmt" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" + xerrors "golang.org/x/xerrors" "github.com/filecoin-project/go-address" ) +const MessageVersion = 0 + +type ChainMsg interface { + Cid() cid.Cid + VMMessage() *Message + ToStorageBlock() (block.Block, error) + // FIXME: This is the *message* length, this name is misleading. + ChainLength() int +} + type Message struct { + Version uint64 + To address.Address From address.Address Nonce uint64 - Value BigInt + Value abi.TokenAmount - GasPrice BigInt - GasLimit BigInt + GasLimit int64 + GasFeeCap abi.TokenAmount + GasPremium abi.TokenAmount - Method uint64 + Method abi.MethodNum Params []byte } +func (m *Message) Caller() address.Address { + return m.From +} + +func (m *Message) Receiver() address.Address { + return m.To +} + +func (m *Message) ValueReceived() abi.TokenAmount { + return m.Value +} + func DecodeMessage(b []byte) (*Message, error) { var msg Message if err := msg.UnmarshalCBOR(bytes.NewReader(b)); err != nil { return nil, err } + if msg.Version != MessageVersion { + return nil, fmt.Errorf("decoded message had incorrect version (%d)", msg.Version) + } + return &msg, nil } @@ -43,14 +75,21 @@ func (m *Message) Serialize() ([]byte, error) { return buf.Bytes(), nil } +func (m *Message) ChainLength() int { + ser, err := m.Serialize() + if err != nil { + panic(err) + } + return len(ser) +} + func (m *Message) ToStorageBlock() (block.Block, error) { data, err := m.Serialize() if err != nil { return nil, err } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(data) + c, err := abi.CidBuilder.Sum(data) if err != nil { return nil, err } @@ -68,10 +107,7 @@ func (m *Message) Cid() cid.Cid { } func (m *Message) RequiredFunds() BigInt { - return BigAdd( - m.Value, - BigMul(m.GasPrice, m.GasLimit), - ) + return BigMul(m.GasFeeCap, NewInt(uint64(m.GasLimit))) } func (m *Message) VMMessage() *Message { @@ -81,3 +117,73 @@ func (m *Message) VMMessage() *Message { func (m *Message) Equals(o *Message) bool { return m.Cid() == o.Cid() } + +func (m *Message) EqualCall(o *Message) bool { + m1 := *m + m2 := *o + + m1.GasLimit, m2.GasLimit = 0, 0 + m1.GasFeeCap, m2.GasFeeCap = big.Zero(), big.Zero() + m1.GasPremium, m2.GasPremium = big.Zero(), big.Zero() + + return (&m1).Equals(&m2) +} + +func (m *Message) ValidForBlockInclusion(minGas int64) error { + if m.Version != 0 { + return xerrors.New("'Version' unsupported") + } + + if m.To == address.Undef { + return xerrors.New("'To' address cannot be empty") + } + + if m.From == address.Undef { + return xerrors.New("'From' address cannot be empty") + } + + if m.Value.Int == nil { + return xerrors.New("'Value' cannot be nil") + } + + if m.Value.LessThan(big.Zero()) { + return xerrors.New("'Value' field cannot be negative") + } + + if m.Value.GreaterThan(TotalFilecoinInt) { + return xerrors.New("'Value' field cannot be greater than total filecoin supply") + } + + if m.GasFeeCap.Int == nil { + return xerrors.New("'GasFeeCap' cannot be nil") + } + + if m.GasFeeCap.LessThan(big.Zero()) { + return xerrors.New("'GasFeeCap' field cannot be negative") + } + + if m.GasPremium.Int == nil { + return xerrors.New("'GasPremium' cannot be nil") + } + + if m.GasPremium.LessThan(big.Zero()) { + return xerrors.New("'GasPremium' field cannot be negative") + } + + if m.GasPremium.GreaterThan(m.GasFeeCap) { + return xerrors.New("'GasFeeCap' less than 'GasPremium'") + } + + if m.GasLimit > build.BlockGasLimit { + return xerrors.New("'GasLimit' field cannot be greater than a block's gas limit") + } + + // since prices might vary with time, this is technically semantic validation + if m.GasLimit < minGas { + return xerrors.New("'GasLimit' field cannot be less than the cost of storing a message on chain") + } + + return nil +} + +const TestGasLimit = 100e6 diff --git a/chain/types/message_receipt.go b/chain/types/message_receipt.go index 48139a347..57761680d 100644 --- a/chain/types/message_receipt.go +++ b/chain/types/message_receipt.go @@ -2,14 +2,16 @@ package types import ( "bytes" + + "github.com/filecoin-project/go-state-types/exitcode" ) type MessageReceipt struct { - ExitCode uint8 + ExitCode exitcode.ExitCode Return []byte - GasUsed BigInt + GasUsed int64 } func (mr *MessageReceipt) Equals(o *MessageReceipt) bool { - return mr.ExitCode == o.ExitCode && bytes.Equal(mr.Return, o.Return) && BigCmp(mr.GasUsed, o.GasUsed) == 0 + return mr.ExitCode == o.ExitCode && bytes.Equal(mr.Return, o.Return) && mr.GasUsed == o.GasUsed } diff --git a/chain/types/message_test.go b/chain/types/message_test.go new file mode 100644 index 000000000..f57385a09 --- /dev/null +++ b/chain/types/message_test.go @@ -0,0 +1,72 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/actors/builtin" +) + +func TestEqualCall(t *testing.T) { + m1 := &Message{ + To: builtin.StoragePowerActorAddr, + From: builtin.SystemActorAddr, + Nonce: 34, + Value: big.Zero(), + + GasLimit: 123, + GasFeeCap: big.NewInt(234), + GasPremium: big.NewInt(234), + + Method: 6, + Params: []byte("hai"), + } + + m2 := &Message{ + To: builtin.StoragePowerActorAddr, + From: builtin.SystemActorAddr, + Nonce: 34, + Value: big.Zero(), + + GasLimit: 1236, // changed + GasFeeCap: big.NewInt(234), + GasPremium: big.NewInt(234), + + Method: 6, + Params: []byte("hai"), + } + + m3 := &Message{ + To: builtin.StoragePowerActorAddr, + From: builtin.SystemActorAddr, + Nonce: 34, + Value: big.Zero(), + + GasLimit: 123, + GasFeeCap: big.NewInt(4524), // changed + GasPremium: big.NewInt(234), + + Method: 6, + Params: []byte("hai"), + } + + m4 := &Message{ + To: builtin.StoragePowerActorAddr, + From: builtin.SystemActorAddr, + Nonce: 34, + Value: big.Zero(), + + GasLimit: 123, + GasFeeCap: big.NewInt(4524), + GasPremium: big.NewInt(234), + + Method: 5, // changed + Params: []byte("hai"), + } + + require.True(t, m1.EqualCall(m2)) + require.True(t, m1.EqualCall(m3)) + require.False(t, m1.EqualCall(m4)) +} diff --git a/chain/types/mock/chain.go b/chain/types/mock/chain.go index 55fe8c11d..559630619 100644 --- a/chain/types/mock/chain.go +++ b/chain/types/mock/chain.go @@ -5,9 +5,13 @@ import ( "fmt" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" - "github.com/ipfs/go-cid" ) func Address(i uint64) address.Address { @@ -20,12 +24,13 @@ func Address(i uint64) address.Address { func MkMessage(from, to address.Address, nonce uint64, w *wallet.Wallet) *types.SignedMessage { msg := &types.Message{ - To: to, - From: from, - Value: types.NewInt(1), - Nonce: nonce, - GasLimit: types.NewInt(1), - GasPrice: types.NewInt(0), + To: to, + From: from, + Value: types.NewInt(1), + Nonce: nonce, + GasLimit: 1000000, + GasFeeCap: types.NewInt(100), + GasPremium: types.NewInt(1), } sig, err := w.Sign(context.TODO(), from, msg.Cid().Bytes()) @@ -46,8 +51,13 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types panic(err) } + pstateRoot := c + if parents != nil { + pstateRoot = parents.Blocks()[0].ParentStateRoot + } + var pcids []cid.Cid - var height uint64 + var height abi.ChainEpoch weight := types.NewInt(weightInc) if parents != nil { pcids = parents.Cids() @@ -57,20 +67,21 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types return &types.BlockHeader{ Miner: addr, - EPostProof: types.EPostProof{ - Proof: []byte("election post proof proof"), + ElectionProof: &types.ElectionProof{ + VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), }, Ticket: &types.Ticket{ VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), }, Parents: pcids, ParentMessageReceipts: c, - BLSAggregate: types.Signature{Type: types.KTBLS, Data: []byte("boo! im a signature")}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, ParentWeight: weight, Messages: c, Height: height, - ParentStateRoot: c, - BlockSig: &types.Signature{Type: types.KTBLS, Data: []byte("boo! im a signature")}, + ParentStateRoot: pstateRoot, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, + ParentBaseFee: types.NewInt(uint64(build.MinimumBaseFee)), } } diff --git a/chain/types/mpool.go b/chain/types/mpool.go new file mode 100644 index 000000000..cf08177e9 --- /dev/null +++ b/chain/types/mpool.go @@ -0,0 +1,22 @@ +package types + +import ( + "time" + + "github.com/filecoin-project/go-address" +) + +type MpoolConfig struct { + PriorityAddrs []address.Address + SizeLimitHigh int + SizeLimitLow int + ReplaceByFeeRatio float64 + PruneCooldown time.Duration + GasLimitOverestimation float64 +} + +func (mc *MpoolConfig) Clone() *MpoolConfig { + r := new(MpoolConfig) + *r = *mc + return r +} diff --git a/chain/types/signature.go b/chain/types/signature.go deleted file mode 100644 index f3a61327b..000000000 --- a/chain/types/signature.go +++ /dev/null @@ -1,120 +0,0 @@ -package types - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" -) - -const SignatureMaxLength = 200 - -const ( - KTSecp256k1 = "secp256k1" - KTBLS = "bls" -) - -const ( - IKTUnknown = -1 - - IKTSecp256k1 = iota - IKTBLS -) - -type Signature struct { - Type string - Data []byte -} - -func SignatureFromBytes(x []byte) (Signature, error) { - val, nr := binary.Uvarint(x) - if nr != 1 { - return Signature{}, fmt.Errorf("signatures with type field longer than one byte are invalid") - } - var ts string - switch val { - case IKTSecp256k1: - ts = KTSecp256k1 - case IKTBLS: - ts = KTBLS - default: - return Signature{}, fmt.Errorf("unsupported signature type: %d", val) - } - - return Signature{ - Type: ts, - Data: x[1:], - }, nil -} - -func (s *Signature) TypeCode() int { - switch s.Type { - case KTSecp256k1: - return IKTSecp256k1 - case KTBLS: - return IKTBLS - default: - return IKTUnknown - } -} - -func (s *Signature) MarshalCBOR(w io.Writer) error { - if s == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - header := cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(s.Data)+1)) - - if _, err := w.Write(header); err != nil { - return err - } - - if _, err := w.Write([]byte{byte(s.TypeCode())}); err != nil { - return err - } - - if _, err := w.Write(s.Data); err != nil { - return err - } - - return nil -} - -func (s *Signature) UnmarshalCBOR(br io.Reader) error { - maj, l, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - - if maj != cbg.MajByteString { - return fmt.Errorf("cbor input for signature was not a byte string") - } - - if l > SignatureMaxLength { - return fmt.Errorf("cbor byte array for signature was too long") - } - - buf := make([]byte, l) - if _, err := io.ReadFull(br, buf); err != nil { - return err - } - - switch buf[0] { - default: - return fmt.Errorf("invalid signature type in cbor input: %d", buf[0]) - case IKTSecp256k1: - s.Type = KTSecp256k1 - case IKTBLS: - s.Type = KTBLS - } - s.Data = buf[1:] - - return nil -} - -func (s *Signature) Equals(o *Signature) bool { - return s.Type == o.Type && bytes.Equal(s.Data, o.Data) -} diff --git a/chain/types/signature_cgo.go b/chain/types/signature_cgo.go deleted file mode 100644 index d86e41d72..000000000 --- a/chain/types/signature_cgo.go +++ /dev/null @@ -1,55 +0,0 @@ -//+build cgo - -package types - -import ( - "fmt" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-crypto" - "github.com/minio/blake2b-simd" -) - -func (s *Signature) Verify(addr address.Address, msg []byte) error { - if addr.Protocol() == address.ID { - return fmt.Errorf("must resolve ID addresses before using them to verify a signature") - } - b2sum := blake2b.Sum256(msg) - - switch s.Type { - case KTSecp256k1: - pubk, err := crypto.EcRecover(b2sum[:], s.Data) - if err != nil { - return err - } - - maybeaddr, err := address.NewSecp256k1Address(pubk) - if err != nil { - return err - } - - if addr != maybeaddr { - return fmt.Errorf("signature did not match") - } - - return nil - case KTBLS: - digests := []bls.Digest{bls.Hash(bls.Message(msg))} - - var pubk bls.PublicKey - copy(pubk[:], addr.Payload()) - pubkeys := []bls.PublicKey{pubk} - - var sig bls.Signature - copy(sig[:], s.Data) - - if !bls.Verify(&sig, digests, pubkeys) { - return fmt.Errorf("bls signature failed to verify") - } - - return nil - default: - return fmt.Errorf("cannot verify signature of unsupported type: %s", s.Type) - } -} diff --git a/chain/types/signature_test.go b/chain/types/signature_test.go index b45024b29..9ade3c046 100644 --- a/chain/types/signature_test.go +++ b/chain/types/signature_test.go @@ -3,12 +3,14 @@ package types import ( "bytes" "testing" + + "github.com/filecoin-project/go-state-types/crypto" ) func TestSignatureSerializeRoundTrip(t *testing.T) { - s := &Signature{ + s := &crypto.Signature{ Data: []byte("foo bar cat dog"), - Type: KTBLS, + Type: crypto.SigTypeBLS, } buf := new(bytes.Buffer) @@ -16,7 +18,7 @@ func TestSignatureSerializeRoundTrip(t *testing.T) { t.Fatal(err) } - var outs Signature + var outs crypto.Signature if err := outs.UnmarshalCBOR(buf); err != nil { t.Fatal(err) } diff --git a/chain/types/signedmessage.go b/chain/types/signedmessage.go index 0ede20a4f..17d2f5d94 100644 --- a/chain/types/signedmessage.go +++ b/chain/types/signedmessage.go @@ -3,23 +3,23 @@ package types import ( "bytes" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" ) -func (m *SignedMessage) ToStorageBlock() (block.Block, error) { - if m.Signature.Type == KTBLS { - return m.Message.ToStorageBlock() +func (sm *SignedMessage) ToStorageBlock() (block.Block, error) { + if sm.Signature.Type == crypto.SigTypeBLS { + return sm.Message.ToStorageBlock() } - data, err := m.Serialize() + data, err := sm.Serialize() if err != nil { return nil, err } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(data) + c, err := abi.CidBuilder.Sum(data) if err != nil { return nil, err } @@ -27,12 +27,12 @@ func (m *SignedMessage) ToStorageBlock() (block.Block, error) { return block.NewBlockWithCid(data, c) } -func (m *SignedMessage) Cid() cid.Cid { - if m.Signature.Type == KTBLS { - return m.Message.Cid() +func (sm *SignedMessage) Cid() cid.Cid { + if sm.Signature.Type == crypto.SigTypeBLS { + return sm.Message.Cid() } - sb, err := m.ToStorageBlock() + sb, err := sm.ToStorageBlock() if err != nil { panic(err) } @@ -42,7 +42,7 @@ func (m *SignedMessage) Cid() cid.Cid { type SignedMessage struct { Message Message - Signature Signature + Signature crypto.Signature } func DecodeSignedMessage(data []byte) (*SignedMessage, error) { @@ -62,6 +62,14 @@ func (sm *SignedMessage) Serialize() ([]byte, error) { return buf.Bytes(), nil } +func (sm *SignedMessage) ChainLength() int { + ser, err := sm.Serialize() + if err != nil { + panic(err) + } + return len(ser) +} + func (sm *SignedMessage) Size() int { serdata, err := sm.Serialize() if err != nil { diff --git a/chain/types/state.go b/chain/types/state.go new file mode 100644 index 000000000..b99eb19c2 --- /dev/null +++ b/chain/types/state.go @@ -0,0 +1,15 @@ +package types + +import "github.com/ipfs/go-cid" + +type StateRoot struct { + // State root version. Versioned along with actors (for now). + Version uint64 + // Actors tree. The structure depends on the state root version. + Actors cid.Cid + // Info. The structure depends on the state root version. + Info cid.Cid +} + +// TODO: version this. +type StateInfo struct{} diff --git a/chain/types/testdata/TestExpFunction.golden b/chain/types/testdata/TestExpFunction.golden new file mode 100644 index 000000000..1fb931874 --- /dev/null +++ b/chain/types/testdata/TestExpFunction.golden @@ -0,0 +1,257 @@ +x, y +0,115792089237316195423570985008687907853269984665640564039457584007913129639936 +2270433122300317557324921274680155055946470287561579687048187921723786855685,113543770489016942962237377411819033281128847358465443152965692667674515268459 +4540866244600635114649842549360310111892940575123159374096375843447573711370,111339107030360092669625707586928312989271150877369072427721155459215537208890 +6811299366900952671974763824040465167839410862684739061144563765171360567055,109177251212712547631698034978205369781807498766451007885527881500477232021682 +9081732489201270229299685098720620223785881150246318748192751686895147422740,107057371846115774238270822835274977319670170085938012474442950429894883628816 +11352165611501587786624606373400775279732351437807898435240939608618934278425,104978653879710027736182386389791146063108246474333192437116903221506365793784 +13622598733801905343949527648080930335678821725369478122289127530342721134110,102940298088363735750708779071187005425195044826988440514290336074628648838684 +15893031856102222901274448922761085391625292012931057809337315452066507989795,100941520765387555093691898043462296359944447036680097792913901290457279892923 +18163464978402540458599370197441240447571762300492637496385503373790294845480,98981553421214956610998073555828635758296463224923810238433739306701915887016 +20433898100702858015924291472121395503518232588054217183433691295514081701165,97059642487933486831615191223039105587754475193940197594806835165639555904556 +22704331223003175573249212746801550559464702875615796870481879217237868556850,95175049029553104647921939291264669504212075519331704270982994731418832879574 +24974764345303493130574134021481705615411173163177376557530067138961655412535,93327048457900197046298852001610623840008858609979636336358094295509875743050 +27245197467603810687899055296161860671357643450738956244578255060685442268220,91514930254028040867379060937212497464993417057199803702713818093495592131219 +29515630589904128245223976570842015727304113738300535931626442982409229123905,89737997695036598537471612414728492361735134837457880776578397164411259714034 +31786063712204445802548897845522170783250584025862115618674630904133015979590,87995567586196615492499061341239298336716207707733388009305962491415997438251 +34056496834504763359873819120202325839197054313423695305722818825856802835275,86286969998275026412807199079440137158468693464163642116594809068619896392941 +36326929956805080917198740394882480895143524600985274992771006747580589690960,84611548009960677185535234025783667242734659094160158991843756961639811974930 +38597363079105398474523661669562635951089994888546854679819194669304376546645,82968657455291330479761642121918798999912566007436555574905312488221049770642 +40867796201405716031848582944242791007036465176108434366867382591028163402330,81357666675984845712317087407692885985045766475399018683682465654293317561467 +43138229323706033589173504218922946062982935463670014053915570512751950258015,79777956278579309738294730369672998880873563557913118175337099978291132200541 +45408662446006351146498425493603101118929405751231593740963758434475737113700,78228918896288743544848199105491032646200262796326433813989850094529077282776 +47679095568306668703823346768283256174875876038793173428011946356199523969385,76709958955482823270730386397827358293917115388940737122493792142598409249118 +49949528690606986261148268042963411230822346326354753115060134277923310825070,75220492446700831714270851931718222334181221472926734650894450187207587804959 +52219961812907303818473189317643566286768816613916332802108322199647097680755,73759946700111799812659669164881767734266809389910877226542646506513000978489 +54490394935207621375798110592323721342715286901477912489156510121370884536440,72327760165334507045772504425420867449416167380982855532448913178137554210923 +56760828057507938933123031867003876398661757189039492176204698043094671392125,70923382195532685995592428750934431740132395987623204573329785474158712116147 +59031261179808256490447953141684031454608227476601071863252885964818458247810,69546272835702420022053453330708655397369524375477499739230911030535808540657 +61301694302108574047772874416364186510554697764162651550301073886542245103495,68195902615070334829829990481057773300819373544625447529491837404705895284329 +63572127424408891605097795691044341566501168051724231237349261808266031959180,66871752343522765217935231891690355471651496064655471661919663456330328641288 +65842560546709209162422716965724496622447638339285810924397449729989818814865,65573312911987628132646843093927406319425698793733957333572673848799954239855 +68112993669009526719747638240404651678394108626847390611445637651713605670550,64300085096692252880128256597394188085029084300287019159255155536716536636582 +70383426791309844277072559515084806734340578914408970298493825573437392526235,63051579367221909582465988028351521656270181701866074076641181901174457665557 +72653859913610161834397480789764961790287049201970549985542013495161179381920,61827315698305238252660159775306665170268657207834096298284442133031571582783 +74924293035910479391722402064445116846233519489532129672590201416884966237605,60626823385254213782224025259478780989246898268785251219150451819890211607196 +77194726158210796949047323339125271902179989777093709359638389338608753093290,59449640862987687230396859494036983576366375788559345059199710577555290424321 +79465159280511114506372244613805426958126460064655289046686577260332539948975,58295315528568921616783939661658889268848452171972763039788768615558483626267 +81735592402811432063697165888485582014072930352216868733734765182056326804660,57163403567188891479248718554448273836403260979137552976694337187145017476501 +84006025525111749621022087163165737070019400639778448420782953103780113660345,56053469781528440285551018178364674618486430618035927413379628478874266581878 +86276458647412067178347008437845892125965870927340028107831141025503900516030,54965087424433688889920273781092319643978134551542030239286748341146003208614 +88546891769712384735671929712526047181912341214901607794879328947227687371715,53897838034840362103953324191045652555242797708181047223345801682385818568835 +90817324892012702292996850987206202237858811502463187481927516868951474227400,52851311276883949594712447365930530107052884187291818795931120263968492084774 +93087758014313019850321772261886357293805281790024767168975704790675261083085,51825104782133842211941756402475821837684219892038783449941692633646927028868 +95358191136613337407646693536566512349751752077586346856023892712399047938770,50818823994890785951863832476942579771969130641977742986737254850253840075592 +97628624258913654964971614811246667405698222365147926543072080634122834794455,49832082020488173548864433257316990617151231489408392527154422207621203727615 +99899057381213972522296536085926822461644692652709506230120268555846621650140,48864499476538848601355411822273410550321020146722594189563317351144555102308 +102169490503514290079621457360606977517591162940271085917168456477570408505825,47915704347070229628266128780655102907390977725210624641985610920484495320871 +104439923625814607636946378635287132573537633227832665604216644399294195361510,46985331839491671953371264029121450743887309752499067780940162596021543960818 +106710356748114925194271299909967287629484103515394245291264832321017982217195,46073024244339074252983177578294403714924665379395655719184742489399140111119 +108980789870415242751596221184647442685430573802955824978313020242741769072880,45178430797742804397094652011443390009353450908460410593202993111115176300509 +111251222992715560308921142459327597741377044090517404665361208164465555928565,44301207546566066275398443059622984529245531090835413454138285364317904357889 +113521656115015877866246063734007752797323514378078984352409396086189342784250,43441017216161856030307536334277824451020867870703344545581645152489753618625 +115792089237316195423570985008687907853269984665640564039457584007913129639935,42597529080697662913911602080197270017224605406643086589378214572018159359656 +118062522359616512980895906283368062909216454953202143726505771929636916495620,41770418835998057231823154500906296232954940017107200954896529163626836852806 +120332955481916830538220827558048217965162925240763723413553959851360703351305,40959368474856275913667539524102962047053738936489506227266772182693729839202 +122603388604217148095545748832728373021109395528325303100602147773084490206990,40164066164766865529760099118055626254866338159841829407810028308443332329921 +124873821726517465652870670107408528077055865815886882787650335694808277062675,39384206128032373421270623986368325593061645265273820139765067969067422649415 +127144254848817783210195591382088683133002336103448462474698523616532063918360,38619488524197990384791697485732970600503064875453399931120765038045198401265 +129414687971118100767520512656768838188948806391010042161746711538255850774045,37869619334768943402646797366590078932213917335501796767692394741802326454228 +131685121093418418324845433931448993244895276678571621848794899459979637629730,37134310250166314581624891999905972285593140562157169782497980837053288139911 +133955554215718735882170355206129148300841746966133201535843087381703424485415,36413278558877823092557676422579316779501436559193491643756131394023796062011 +136225987338019053439495276480809303356788217253694781222891275303427211341100,35706247038760950822158313374858241516488865748339595127547394046982094249410 +138496420460319370996820197755489458412734687541256360909939463225150998196785,35012943850456619981286410714734531636271581473914516202135729910546566402779 +140766853582619688554145119030169613468681157828817940596987651146874785052470,34333102432872442378462629116615524982524168457316936505909372845805604900401 +143037286704920006111470040304849768524627628116379520284035839068598571908155,33666461400695355776019567288676251030810566791286359575983902200864972469996 +145307719827220323668794961579529923580574098403941099971084026990322358763840,33012764443894243004677309675463059806544657868579375264515822376830510918451 +147578152949520641226119882854210078636520568691502679658132214912046145619525,32371760229173894620565861867718452819385944209047587245952825283707541232161 +149848586071820958783444804128890233692467038979064259345180402833769932475210,31743202303342426140951325851524709564919796135818066107980273521807047228205 +152119019194121276340769725403570388748413509266625839032228590755493719330895,31126848998554996579614932864397941484844825641744252641191173459915093805187 +154389452316421593898094646678250543804359979554187418719276778677217506186580,30522463339397396402839881838375473280428264227964104623768547140174704396628 +156659885438721911455419567952930698860306449841748998406324966598941293042265,29929812951773780419644078230146692062203486832083285970050129935848796157421 +158930318561022229012744489227610853916252920129310578093373154520665079897950,29348669973563514777235550444240282773251884769510008252819331832439494645079 +161200751683322546570069410502291008972199390416872157780421342442388866753635,28778810967012787421358795112757003059950944596369120909715112149169964220370 +163471184805622864127394331776971164028145860704433737467469530364112653609320,28220016832827298362763401072714572174637391915333615996330037256359689152007 +165741617927923181684719253051651319084092330991995317154517718285836440465005,27672072725933000121901983367925347694111025283271589335038719296229849510185 +168012051050223499242044174326331474140038801279556896841565906207560227320690,27134767972872500055614403280459447260039099989714465645168626995212978266528 +170282484172523816799369095601011629195985271567118476528614094129284014176375,26607895990805365148558542391218752327077936735384764869356894955189244136002 +172552917294824134356694016875691784251931741854680056215662282051007801032060,26091254208081186520295830628576992670321075391182679321065607327265313932322 +174823350417124451914018938150371939307878212142241635902710469972731587887745,25584643986354865593328765236833491064966760582438881050282097633879965205565 +177093783539424769471343859425052094363824682429803215589758657894455374743430,25087870544214176820510595839294024971107825899184642127021233999936247081507 +179364216661725087028668780699732249419771152717364795276806845816179161599115,24600742882290243310082706540466488496131563143845631581983311961047083061256 +181634649784025404585993701974412404475717623004926374963855033737902948454800,24123073709822131836694888705209306323992339748006300420231840208502026712055 +183905082906325722143318623249092559531664093292487954650903221659626735310485,23654679372647332806338881400061315566868133427664662629105489367609937695646 +186175516028626039700643544523772714587610563580049534337951409581350522166170,23195379782590438967131080837393236404679360225477050241221440433306491390529 +188445949150926357257968465798452869643557033867611114024999597503074309021855,22744998348222874237097258296963376819397584603868543116623643157984253142510 +190716382273226674815293387073133024699503504155172693712047785424798095877540,22303361906967051161228844170125488556352938430940411562982254002868044311034 +192986815395526992372618308347813179755449974442734273399095973346521882733225,21870300658518852415771961233241780589753603014109439052609181643134330131200 +195257248517827309929943229622493334811396444730295853086144161268245669588910,21445648099562838646716735757033316057251336889853858375428467187815522927837 +197527681640127627487268150897173489867342915017857432773192349189969456444595,21029240959755081956656390289742251499446416407847034496990361844628688581109 +199798114762427945044593072171853644923289385305419012460240537111693243300280,20620919138949011730680161329173099008306763464882403634448661640684501661866 +202068547884728262601917993446533799979235855592980592147288725033417030155965,20220525645640137405137620676980023532901299004381393279093890751532452054423 +204338981007028580159242914721213955035182325880542171834336912955140817011650,19827906536605981416712487046651644994469849037549721389552871270682788176368 +206609414129328897716567835995894110091128796168103751521385100876864603867335,19442910857718015103451661807495877546362994006807802531582187045985408388323 +208879847251629215273892757270574265147075266455665331208433288798588390723020,19065390585902840940891643898043637774181614198870274451246241331546097600612 +211150280373929532831217678545254420203021736743226910895481476720312177578705,18695200572230306358460705740514292808692490977651168538902708783481421680348 +213420713496229850388542599819934575258968207030788490582529664642035964434390,18332198486106667663798285266862799262530724876828607112610697476363307765699 +215691146618530167945867521094614730314914677318350070269577852563759751290075,17976244760551347472111336927386090359808454572176186089250363261529225698797 +217961579740830485503192442369294885370861147605911649956626040485483538145760,17627202538536245657534631830721579840416279034844169182844082749512022955880 +220232012863130803060517363643975040426807617893473229643674228407207325001445,17284937620366972373860132250248373607831381821348510576523341628979456457899 +222502445985431120617842284918655195482754088181034809330722416328931111857130,16949318412085772290021286893087219332537721900597190962614965688689436470898 +224772879107731438175167206193335350538700558468596389017770604250654898712815,16620215874876302005383259706474512944760516297587146250989332554945114509504 +227043312230031755732492127468015505594647028756157968704818792172378685568500,16297503475450807802230954908656139014943597886328184429574709348911081384486 +229313745352332073289817048742695660650593499043719548391866980094102472424185,15981057137400628605961920332938849374838761847812884031059653371884698227732 +231584178474632390847141970017375815706539969331281128078915168015826259279870,15670755193491319402603874616100339746216923922257041729716527735529769853650 +233854611596932708404466891292055970762486439618842707765963355937550046135555,15366478338884053550790009737665512422829259009202431332168817628656130747976 +236125044719233025961791812566736125818432909906404287453011543859273832991240,15068109585265318560877642552077774766432262308314225827173090425609387180236 +238395477841533343519116733841416280874379380193965867140059731780997619846925,14775534215867269134414164114591717654313758765518045957328319974951247947864 +240665910963833661076441655116096435930325850481527446827107919702721406702610,14488639741361443696906809527925407527798326175077538816130520515576676432536 +242936344086133978633766576390776590986272320769089026514156107624445193558295,14207315856608886447500721787929418266891341269962575206208292606790558927738 +245206777208434296191091497665456746042218791056650606201204295546168980413980,13931454398250046219817842121237250069017319717552215812396900410729316780042 +247477210330734613748416418940136901098165261344212185888252483467892767269665,13660949303118146325455249570700165024364040604752663984462703166489664742339 +249747643453034931305741340214817056154111731631773765575300671389616554125350,13395696567460036159625482974163434950873419335747629798807509358843043114747 +252018076575335248863066261489497211210058201919335345262348859311340340981035,13135594206948845808872493016734991140070074672980170698797842470835657746355 +254288509697635566420391182764177366266004672206896924949397047233064127836720,12880542217473069333080972366140272174677497922483630681691757178031836728636 +256558942819935883977716104038857521321951142494458504636445235154787914692405,12630442536687000915161971042648362535726831652623366360951945091372138829073 +258829375942236201535041025313537676377897612782020084323493423076511701548090,12385199006307740796619289137869014505391917569864233277491827537392271272526 +261099809064536519092365946588217831433844083069581664010541610998235488403775,12144717335144274958225939593470301902208893969091913127900701796820969916276 +263370242186836836649690867862897986489790553357143243697589798919959275259460,11908905062844413972629474569782724684547574705479145661987238841262047512889 +265640675309137154207015789137578141545737023644704823384637986841683062115145,11677671524345652458077781103683012746985203951843087585351200546458450234583 +267911108431437471764340710412258296601683493932266403071686174763406848970830,11450927815016281205730420816822769656276856651451425159655410140798486590368 +270181541553737789321665631686938451657629964219827982758734362685130635826515,11228586756473349441251594323339849390046255944661352162796836657552528120100 +272451974676038106878990552961618606713576434507389562445782550606854422682200,11010562863064334916605775798811477728189133603960442457397758272811779333449 +274722407798338424436315474236298761769522904794951142132830738528578209537885,10796772308999634710251182117797466377054234368136632046647040836884880709149 +276992840920638741993640395510978916825469375082512721819878926450301996393570,10587132896123239841361951569073852197393979276538146129343090522801499481360 +279263274042939059550965316785659071881415845370074301506927114372025783249255,10381564022309202172514611506897394878877748812065679452742065418299049556809 +281533707165239377108290238060339226937362315657635881193975302293749570104940,10179986650471742679787468858482358928499942961441493648347660820939932354037 +283804140287539694665615159335019381993308785945197460881023490215473356960625,9982323278177086101950544601003257424140532557067992549916807460469069141662 +286074573409840012222940080609699537049255256232759040568071678137197143816310,9788497907845338332079968817743865213174453409259845129758908478094549262272 +288345006532140329780265001884379692105201726520320620255119866058920930671995,9598436017530949774464402361919053565881937627109257874972510208401009770963 +290615439654440647337589923159059847161148196807882199942168053980644717527680,9412064532270530344304969563638312480198627803597895883581869229790285361030 +292885872776740964894914844433740002217094667095443779629216241902368504383365,9229311795986999922973693773512111376663242782368889508055395732551288322377 +295156305899041282452239765708420157273041137383005359316264429824092291239050,9050107543939271981357133004643947981886610395815063538792001543567721697353 +297426739021341600009564686983100312328987607670566939003312617745816078094735,8874382875706877830311960657026508969290082662731915199950324279800475201683 +299697172143641917566889608257780467384934077958128518690360805667539864950420,8702070228699144631140925089885492819072907992531772224458410703022324971560 +301967605265942235124214529532460622440880548245690098377408993589263651806105,8533103352178741979338410836757093105256375792397040329065630149508413086135 +304238038388242552681539450807140777496827018533251678064457181510987438661790,8367417281789609639196736794874998402544481231388500060346139012236641543825 +306508471510542870238864372081820932552773488820813257751505369432711225517475,8204948314579472931243763439936896182045474760835882712220942160049717189792 +308778904632843187796189293356501087608719959108374837438553557354435012373160,8045633984507342433459354572450622071501444572221358150888492135726910593613 +311049337755143505353514214631181242664666429395936417125601745276158799228845,7889413038426581123905040544325858121069927256074308557819634343914244175953 +313319770877443822910839135905861397720612899683497996812649933197882586084530,7736225412534304938490539495998345262722685569683811485718517851478118074806 +315590203999744140468164057180541552776559369971059576499698121119606372940215,7586012209278062013393244296606783131019725342345569324815264018948034488407 +317860637122044458025488978455221707832505840258621156186746309041330159795900,7438715674710911696077991529315597910182131547815478256768516102178053147428 +320131070244344775582813899729901862888452310546182735873794496963053946651585,7294279176286196809531497726711149751773109079006438039328871808860505208647 +322401503366645093140138821004582017944398780833744315560842684884777733507270,7152647181083471707513356405012353952405611456310792512957976292047901188617 +324671936488945410697463742279262173000345251121305895247890872806501520362955,7013765234457214429330978163948031181884804665874684563234080173204818792192 +326942369611245728254788663553942328056291721408867474934939060728225307218640,6877579939100113814604817973991912882261222190269087624747098477473070920910 +329212802733546045812113584828622483112238191696429054621987248649949094074325,6744038934512881834200618407209402738374726406854013386327229734819886355209 +331483235855846363369438506103302638168184661983990634309035436571672880930010,6613090876872697694251640706911864470257281520044099539344107417605448847522 +333753668978146680926763427377982793224131132271552213996083624493396667785695,6484685419292543536070441670407899002441885350144223913319492393155843996609 +336024102100446998484088348652662948280077602559113793683131812415120454641380,6358773192463841844684321602472961808036370534710746466420213566001948927453 +338294535222747316041413269927343103336024072846675373370180000336844241497065,6235305785674952050504565665502249112334426989789636398933745643420892537805 +340564968345047633598738191202023258391970543134236953057228188258568028352750,6114235728198228318918503273647009044057809740115762935228605445446011541536 +342835401467347951156063112476703413447917013421798532744276376180291815208435,5995516471038482226936505302234614253048545365172641788639745380038847011850 +345105834589648268713388033751383568503863483709360112431324564102015602064120,5879102369035832978915788636227029754149990028304128445154065521569679896299 +347376267711948586270712955026063723559809953996921692118372752023739388919805,5764948663316064068243885471976803898564123312018636117461200936317441104963 +349646700834248903828037876300743878615756424284483271805420939945463175775490,5653011464081738901084106683312765416762298565331902697873676865978153014007 +351917133956549221385362797575424033671702894572044851492469127867186962631175,5543247733737458913233334532284737157076797625887278020789776292123991283042 +354187567078849538942687718850104188727649364859606431179517315788910749486860,5435615270342776182191625059155814516167449877265043810529451526313401440000 +356458000201149856500012640124784343783595835147168010866565503710634536342545,5330072691386398513087911834712869898427723411257226595273064182550426763859 +358728433323450174057337561399464498839542305434729590553613691632358323198230,5226579417875448507582021006206716342354483979921442218144570774799799153756 +360998866445750491614662482674144653895488775722291170240661879554082110053915,5125095658733659256765209168041122238133229257032719961413509211200048248333 +363269299568050809171987403948824808951435246009852749927710067475805896909600,5025582395502508078982293698284693835284469532255851267477746214208169030829 +365539732690351126729312325223504964007381716297414329614758255397529683765285,4928001367339406197066635979466804208185522234029567523625554767402211661565 +367810165812651444286637246498185119063328186584975909301806443319253470620970,4832315056307176461496499130904489365080602372384889128573871476885084197963 +370080598934951761843962167772865274119274656872537488988854631240977257476655,4738486672949163220359850565587517062963871431792713253071282139856533845449 +372351032057252079401287089047545429175221127160099068675902819162701044332340,4646480142144428256814095536419820031461458638508982355406532908762559366752 +374621465179552396958612010322225584231167597447660648362951007084424831188025,4556260089237594402171008726677813775125098076724227743627974892272588327070 +376891898301852714515936931596905739287114067735222228049999195006148618043710,4467791826438004029228927006814200361630588836617618519025510750743109199997 +379162331424153032073261852871585894343060538022783807737047382927872404899395,4381041339482963176613798998129770586400722046177381492207741626501677387270 +381432764546453349630586774146266049399007008310345387424095570849596191755080,4295975274559943590489358236239541053798654812625189402565013609912762502063 +383703197668753667187911695420946204454953478597906967111143758771319978610765,4212560925482714534092886286689429135772883215331118745250144375204860994258 +385973630791053984745236616695626359510899948885468546798191946693043765466450,4130766221116473846427146968171682243610648632762217464220432443306744076356 +388244063913354302302561537970306514566846419173030126485240134614767552322135,4050559713047143466628204822445172093558799258185730849489440714923595198306 +390514497035654619859886459244986669622792889460591706172288322536491339177820,3971910563490088516841239430057423861561699540314803552309386818215754652836 +392784930157954937417211380519666824678739359748153285859336510458215126033505,3894788533433611089965680791375046317061917745510095607652825551910399528715 +395055363280255254974536301794346979734685830035714865546384698379938912889190,3819163971012660154769745087149569914465567827492977553371176365210754983049 +397325796402555572531861223069027134790632300323276445233432886301662699744875,3745007800108287504328254434609249402710340257581916107575424870725436120735 +399596229524855890089186144343707289846578770610838024920481074223386486600560,3672291509168466468538116483953888412415082359790291319830667248103068617726 +401866662647156207646511065618387444902525240898399604607529262145110273456245,3600987140245975220983870041875004895679551495743211205866825798183240723922 +404137095769456525203835986893067599958471711185961184294577450066834060311930,3531067278249129967384164433549615728055232151532910541204821495878913836970 +406407528891756842761160908167747755014418181473522763981625637988557847167615,3462505040401235139336450269559039606157745830970264242579700439549733102016 +408677962014057160318485829442427910070364651761084343668673825910281634023300,3395274065904697964555996096535291493010779102468849709076001181212499029926 +410948395136357477875810750717108065126311122048645923355722013832005420878985,3329348505805833474130146144622500207015802447060544079287034222519730000801 +413218828258657795433135671991788220182257592336207503042770201753729207734670,3264703013056463168733957305768762048671220120100675556528510459350361442934 +415489261380958112990460593266468375238204062623769082729818389675452994590355,3201312732768486228946010278262122858837369338972372870253840600423809756146 +417759694503258430547785514541148530294150532911330662416866577597176781446040,3139153292657676348854251985374092135837494969951169782173776159644176534423 +420030127625558748105110435815828685350097003198892242103914765518900568301725,3078200793673030025577310098607138197203259951148621910059374341071023253667 +422300560747859065662435357090508840406043473486453821790962953440624355157410,3018431800808063478119054352864365216599069977484266467250325895140520117178 +424570993870159383219760278365188995461989943774015401478011141362348142013095,2959823334090525324552359078463176696893036195521598690559604864862321028159 +426841426992459700777085199639869150517936414061576981165059329284071928868780,2902352859747060743788555836574429934024784962979281619588506007014730754443 +429111860114760018334410120914549305573882884349138560852107517205795715724465,2845998281539430113506217335511024116647561786738955796723957128032460028562 +431382293237060335891735042189229460629829354636700140539155705127519502580150,2790737932268951075048841664512779579965827455433050874470612581492493796612 +433652726359360653449059963463909615685775824924261720226203893049243289435835,2736550565445897654615705160581380392035754504074875922703396122219838983601 +435923159481660971006384884738589770741722295211823299913252080970967076291520,2683415347120653492731207292836793547250299699949146040965909097366506826314 +438193592603961288563709806013269925797668765499384879600300268892690863147205,2631311847873478425170175781779819578108906286445566029098739049466961949804 +440464025726261606121034727287950080853615235786946459287348456814414650002890,2580220034959808642151093783793953402129918191024990104539947286476194870361 +442734458848561923678359648562630235909561706074508038974396644736138436858575,2530120264608070452133030819123365918858694576649021314235932359705643943273 +445004891970862241235684569837310390965508176362069618661444832657862223714260,2480993274467046314956915988152178555256121842686531405628524104350927660746 +447275325093162558793009491111990546021454646649631198348493020579586010569945,2432820176199889308902969867700028980919093407680036321083074765251991144298 +449545758215462876350334412386670701077401116937192778035541208501309797425630,2385582448221938579601135652712703469128873722647430092259396421200608174744 +451816191337763193907659333661350856133347587224754357722589396423033584281315,2339261928579543607308487238103279986560541266662742536344201858542715699027 +454086624460063511464984254936031011189294057512315937409637584344757371137000,2293840807967159344114184788286120867801944082584440628149280631165005320951 +456357057582363829022309176210711166245240527799877517096685772266481157992685,2249301622880027434993188844156208157845130306302793141117411709623435670173 +458627490704664146579634097485391321301186998087439096783733960188204944848370,2205627248899810866744487578412286757829514684708503434368069205255340039839 +460897923826964464136959018760071476357133468375000676470782148109928731704055,2162800894110600506761016035372877280458113909309651595222790899580498073682 +463168356949264781694283940034751631413079938662562256157830336031652518559740,2120806092642762118940557002260420725877403403643132375546681834433562628493 +465438790071565099251608861309431786469026408950123835844878523953376305415425,2079626698342141596131901285086838588831365589656553686857278748333033108659 +467709223193865416808933782584111941524972879237685415531926711875100092271110,2039246878562194346216395910444043784690021169116503952080024996424232086889 +469979656316165734366258703858792096580919349525246995218974899796823879126795,1999651108076652030782767922491430088269301331012688651267426380085596201366 +472250089438466051923583625133472251636865819812808574906023087718547665982480,1960824163110386199534010054524334089541753293394021687030939529972846447262 +474520522560766369480908546408152406692812290100370154593071275640271452838165,1922751115486173807887555445320207063113604718628558999245364537712059659787 +476790955683066687038233467682832561748758760387931734280119463561995239693850,1885417326885114167166377133451693487795698974827428621915772121522296322640 +479061388805367004595558388957512716804705230675493313967167651483719026549535,1848808443218490573462190057395600374688639714455162176619470154537224426642 +481331821927667322152883310232192871860651700963054893654215839405442813405220,1812910389108912709483102199711379114048329569523931425832014763952606023142 +483602255049967639710208231506873026916598171250616473341264027327166600260905,1777709362478617929951137999560857523114698493464640463314567998578763375930 +485872688172267957267533152781553181972544641538178053028312215248890387116590,1743191829242850741544443083382012278950030439838258806090307410195511152367 +488143121294568274824858074056233337028491111825739632715360403170614173972275,1709344518106280188825408838548340945706907890570414138312511052040629403114 +490413554416868592382182995330913492084437582113301212402408591092337960827960,1676154415460454473592585178001481888630499860343530369736761941087841137938 +492683987539168909939507916605593647140384052400862792089456779014061747683645,1643608760380330981872631688029464454202113613160652114812004614315882448607 +494954420661469227496832837880273802196330522688424371776504966935785534539330,1611695039717957985264493988118541627646396847164138227584506579980006189593 +497224853783769545054157759154953957252276992975985951463553154857509321395015,1580400983291421636207279946257561145576531467125488184028701658324803392105 +499495286906069862611482680429634112308223463263547531150601342779233108250700,1549714559167208504327370397252359435900676480118217197526808984667322295656 +501765720028370180168807601704314267364169933551109110837649530700956895106385,1519623969034169817411698521182954716530148741935550434876916238707975489825 +504036153150670497726132522978994422420116403838670690524697718622680681962070,1490117643667308789561999136522296289524839311469606144758176235295035574077 +506306586272970815283457444253674577476062874126232270211745906544404468817755,1461184238479646954250162028027864509672389242493414237027164373310873876617 +508577019395271132840782365528354732532009344413793849898794094466128255673440,1432812629160459284595696582392048709701342884089101780789506016757983545698 +510847452517571450398107286803034887587955814701355429585842282387852042529125,1404991907398201090242984454401886460001256486743771644647644675643019397120 +513117885639871767955432208077715042643902284988917009272890470309575829384810,1377711376686482242495906258989351428318340736703325118283502743772525420186 +515388318762172085512757129352395197699848755276478588959938658231299616240495,1350960548211476209390117246928289328516166528957389393408566548846213309144 +517658751884472403070082050627075352755795225564040168646986846153023403096180,1324729136819182692425165292411394327407511934209655077877674460464300121300 +519929185006772720627406971901755507811741695851601748334035034074747189951865,1299007057060993358777871916475898081725576133192311731321945268374304875079 +522199618129073038184731893176435662867688166139163328021083221996470976807550,1273784419316040268779271406014391437821438711132700913654395176336120357812 +524470051251373355742056814451115817923634636426724907708131409918194763663235,1249051525988836119835059106151604655038913776255529646900786453845629450261 +526740484373673673299381735725795972979581106714286487395179597839918550518920,1224798867780744376154319639656429438292590596449478554800662259883398133550 +529010917495973990856706657000476128035527577001848067082227785761642337374605,1201017120033845739753282342628997286268151802300216872136210819143441303664 +531281350618274308414031578275156283091474047289409646769275973683366124230290,1177697139145795253133868286972489045105718361046528878527448066379668652133 +533551783740574625971356499549836438147420517576971226456324161605089911085975,1154829959054291618502821225483149900650534431243083949245642604820923646588 +535822216862874943528681420824516593203366987864532806143372349526813697941660,1132406787789807082890425327211328755183700827007270235992079331829362643852 +538092649985175261086006342099196748259313458152094385830420537448537484797345,1110419004095252483338613492946986193747814242216914544087253927761374702549 +540363083107475578643331263373876903315259928439655965517468725370261271653030,1088858154111277781547261772857694251337802353360681665028106984738120501199 +542633516229775896200656184648557058371206398727217545204516913291985058508715,1067715948125933652889320598100629627595490376276215626206049496855639908739 +544903949352076213757981105923237213427152869014779124891565101213708845364400,1046984257387444440232701363407888596891997329416358164537461237895052190493 +547174382474376531315306027197917368483099339302340704578613289135432632220085,1026655110978867048053673500561104502292891906760695382551282292885769647385 +549444815596676848872630948472597523539045809589902284265661477057156419075770,1006720692753434146222353671415953866113533147281398201361542821926894368710 +551715248718977166429955869747277678594992279877463863952709664978880205931455,987173338329403384733958131783829533009147622988398599380784122106977308530 +553985681841277483987280791021957833650938750165025443639757852900603992787140,968005532143257199520483988560220733582298517120243540446994347402239110174 +556256114963577801544605712296637988706885220452587023326806040822327779642825,949209904560120224102845604039392059367322323910424009332173176480678795959 +558526548085878119101930633571318143762831690740148603013854228744051566498510,930779229040283320858879764591061717606873826779909083197312274038703255036 +560796981208178436659255554845998298818778161027710182700902416665775353354195,912706419360744817546225148870963828076628869100719699856881194145713586169 +563067414330478754216580476120678453874724631315271762387950604587499140209880,894984526890700687724824577161411173596658532863246109848772040565011872478 +565337847452779071773905397395358608930671101602833342074998792509222927065565,877606737919936156004596577686776039865420267809390448859339117903023609116 +567608280575079389331230318670038763986617571890394921762046980430946713921250,860566371039091548574664590528726362251021703804112434559419129682978794632 +569878713697379706888555239944718919042564042177956501449095168352670500776935,843856874570795154071557033366442926236979708804975971538286570121216914411 +572149146819680024445880161219399074098510512465518081136143356274394287632620,827471824050675417183296315424741971779066957250062452852260880325027656529 +574419579941980342003205082494079229154456982753079660823191544196118074488305,811404919757283964983682303379036253598800697102332643556476037720459831748 +576690013064280659560530003768759384210403453040641240510239732117841861343990,795649984289979771219745127738174910086802387263030261900019674061458963150 +578960446186580977117854925043439539266349923328202820197287920039565648199675,780200960193843203865524721501305631184795994974723658032277935470179306730 diff --git a/chain/types/testdata/TestLambdaFunction/10-100.golden b/chain/types/testdata/TestLambdaFunction/10-100.golden new file mode 100644 index 000000000..adb7eaf5e --- /dev/null +++ b/chain/types/testdata/TestLambdaFunction/10-100.golden @@ -0,0 +1 @@ +57896044618658097711785492504343953926634992332820282019728792003956564819968 \ No newline at end of file diff --git a/chain/types/testdata/TestLambdaFunction/1024-2048.golden b/chain/types/testdata/TestLambdaFunction/1024-2048.golden new file mode 100644 index 000000000..d449e4eab --- /dev/null +++ b/chain/types/testdata/TestLambdaFunction/1024-2048.golden @@ -0,0 +1 @@ +289480223093290488558927462521719769633174961664101410098643960019782824099840 \ No newline at end of file diff --git a/chain/types/testdata/TestLambdaFunction/2000000000000000-100000000000000000.golden b/chain/types/testdata/TestLambdaFunction/2000000000000000-100000000000000000.golden new file mode 100644 index 000000000..76d1403ac --- /dev/null +++ b/chain/types/testdata/TestLambdaFunction/2000000000000000-100000000000000000.golden @@ -0,0 +1 @@ +11579208923731619542357098500868790785326998466564056403945758400791312963993 \ No newline at end of file diff --git a/chain/types/testdata/TestPoissonFunction/lam-10-10.golden b/chain/types/testdata/TestPoissonFunction/lam-10-10.golden new file mode 100644 index 000000000..fbe59115f --- /dev/null +++ b/chain/types/testdata/TestPoissonFunction/lam-10-10.golden @@ -0,0 +1,17 @@ +icdf +1125278653883954157340515998824199281259686237023612910954531537010133365568 +5485581780123676224984074899749002236148166431650497590243915223971292577 +17842170241691453912141677461647358103546946338181118738604570718548080 +43538699106868068808561503680708109912117284430088557923220935824303 +85008817354919776986513548953593326094752560579206896166859206326 +138328508214407784218646352510285887677303021571444942144212932 +192946940473264032619492808002293590266469557064324916486706 +235498963868775663540157747991701194152938740691242898919 +255504995002156513848360474242833468958493714151012216 +249505772801580009049072856702435230216536441494110 +232834101038081471620316286291642591265760137159 +11533551765583311484083562200606025547302501012 +11353456917542541496993529059255898133794641608 +11353321629946357024346977051186975261639061786 +11353321535577219060847642123725989684858819334 +11353321535515780819985988910882590605707269697 diff --git a/chain/types/testdata/TestPoissonFunction/lam-1036915-20.golden b/chain/types/testdata/TestPoissonFunction/lam-1036915-20.golden new file mode 100644 index 000000000..45444615d --- /dev/null +++ b/chain/types/testdata/TestPoissonFunction/lam-1036915-20.golden @@ -0,0 +1,17 @@ +icdf +72718197862321603787957847055188249408647877796280116987586082825356498974798 +30123322455004902866096392147720313525979066051167804713805891738863284666878 +9062729215708086547397523150443475826195010851218953471088727298493148732956 +2120601657722952965249404258310617497861606438105371150448777320082300909521 +404370264674629622846679825118378024155401061236248117877372450081489268106 +64941157977031699837280219244596630364570458903895153606060064374226923144 +8998760514291795062091202215054973561658841304177095664084807386711484044 +1095864305518189121413406860322570887693509822868614985675874891701983877 +118988091690998292615838041961723734187855286032350465668217096980526413 +11653361409102593830837021393444274321721937984503223867724441309766994 +1039253147016500070761515827557061937775360855994320103804816785188376 +85064880905559492020902336338153555957806293407638915883403930221828 +6433469833589351070633969345898155559842007456615136652210720692299 +452164666340411064711833496915674079929092772106312520794348036629 +29679788379529243880483477334855509673275091060271619731000625321 +1827354397264548824373139406487609602015834650190678153972930556 diff --git a/chain/types/testdata/TestPoissonFunction/lam-1706-10.golden b/chain/types/testdata/TestPoissonFunction/lam-1706-10.golden new file mode 100644 index 000000000..fb6689a48 --- /dev/null +++ b/chain/types/testdata/TestPoissonFunction/lam-1706-10.golden @@ -0,0 +1,17 @@ +icdf +93907545465879218275260347624273708225148723352890415890955745471328115138024 +57447553596668785643406883388130520172829512611140657354486862128150346836988 +27076095525930017054568011324233899656590951319429188573619716140132147265911 +10209654292635635800479757502291310268341281539592025246744927385067352842651 +3184715634432458451975226003210729824895496226017269232182332263939291493510 +843984120585852874524302031056145794325474791455055607017530061469667926785 +194034907919461416983404196340045475941285897027461784665454449911533518447 +39345544525367691179167072173518251727638261160626536209449847049064587000 +7131182470884793691126479665205819536661348710819293305892247868965466268 +1167890189531948301087715470416213490892402026859749426392544722128541359 +174396377814374645286335419995210764907848995332895729280582459579342738 +23925812935985027317838050852967276757134553590636105055350959232313899 +3035286920153916620063269622769735189335169019323041991528942663951986 +358060554242868329017312833503133182524340437692927389149107908421231 +39467628723598770945019147503633808666969235107758896427288845018926 +4082242594961149456000070139366495398696105445630156285138889148853 diff --git a/chain/types/testdata/TestPoissonFunction/lam-2-0.golden b/chain/types/testdata/TestPoissonFunction/lam-2-0.golden new file mode 100644 index 000000000..c64994bd9 --- /dev/null +++ b/chain/types/testdata/TestPoissonFunction/lam-2-0.golden @@ -0,0 +1,17 @@ +icdf +100121334043824876020967110392587568107053060743383522309741056272383359786578 +68779823656842237215759361160386888614619212898869438850308000801323820079862 +37438313269859598410551611928186209122185365054355355390874945330264280373146 +16543973011871172540413112440052422793896133158012633084586241682891253902002 +6096802882876959605343862695985529629751517209841271931441889859204740666430 +1917934831279274431316162798358772364093670830572727470184149129730135372202 +524978814080046039973596165816519942207722037483212649764902219905266940794 +126991380594552213875719985090162107383165239457636986787974531383875960392 +27494522223178757351250939908572648677026039951243071043742609253528215292 +5384109251762433679146707645997213408995106727599978656135515446784271938 +962026657479168944725861193482126355388920082871360178614096685435483268 +158011640336757174831161838479383254733249783829793182701111456099339874 +24009137479688546515378612645592737957304733989532016715613917876649310 +3393367809370296005258116363471119991774726321799529640921988919312302 +448257856467688789526616894596603139556153797837745773108856211121302 +55576529414007827429083632080000892593677461309507924067105183362502 diff --git a/chain/types/testdata/TestPoissonFunction/lam-209714-20.golden b/chain/types/testdata/TestPoissonFunction/lam-209714-20.golden new file mode 100644 index 000000000..3a5a7bd46 --- /dev/null +++ b/chain/types/testdata/TestPoissonFunction/lam-209714-20.golden @@ -0,0 +1,17 @@ +icdf +20989436322611254574979389012727393136091537312055593688180077279147576363096 +2029014232696520721523332453453803636238058967796600089005757967894658844577 +132982872945592560215194824292206599698851338836977427578620974467337246296 +6581505574095040906286115477587166321018484661524779791358182693980706360 +261473369241451189291715224208035992983406808190620756138506410631265448 +8673527587881831627652027122245179992541421812500366598395022980519170 +246914417172755020716947338861248744263385116558896234139925344965714 +6155418508705151241339695503211918565434455355693098789916851059631 +136477982954924943101944815709613669983383501630000081908122878386 +2724514397229876659600187515561464490237868059330199615762951760 +49460332945698577716770256622916953121860884014393828193908634 +823264627479642334265449493920662550930201158945634649498769 +12651460568281449375957051928671501418597070452648092732548 +180560129118157986659345179422379755776654969450793746138 +2405427973892505908363489341734991530618943133521671600 +30045550760659097055494870911555042207154242485930695 diff --git a/chain/types/testdata/TestPoissonFunction/lam-5-0.golden b/chain/types/testdata/TestPoissonFunction/lam-5-0.golden new file mode 100644 index 000000000..e7df148c9 --- /dev/null +++ b/chain/types/testdata/TestPoissonFunction/lam-5-0.golden @@ -0,0 +1,17 @@ +icdf +115011888277122352219705460287186602222085188670665840381425306072442950421456 +111110883476153136200377836679680074066161208695792222091263916395092054329056 +101358371473730096152058777660913753676351258758608176365860442201714814098056 +85104184803025029404860345962969886360001342196634766823521318546086080379726 +64786451464643695970862306340540052214563946494168004895597413976550163231816 +44468718126262362536864266718110218069126550791701242967673509407014246083906 +27537273677611251341865900366085356281262054372978941361070255599067648460651 +15443384785717600488295638686067597861358842645320154499210788593391507301186 +7884704228284068704814225136056498848919335315533412710548621714843919076521 +3685437251932106602880106497161443842008497910096333939069640115650814507266 +1585803763756125551913047177713916338553079207377794553330149316054262222641 +631424905494315983291656577965040200618797978869367559812198952601283911451 +233767047885228663032743828069675143146180800324189645846386301162542948456 +80821718035579693702392770417611659502866500883736602013381435224565655001 +26198385946419347512981678399017558201682822512146229215879697389573764486 +7990608583365898783177981059486191101288263054949438283379118111243134316 diff --git a/chain/types/testdata/TestPoissonFunction/lam-5242879-20.golden b/chain/types/testdata/TestPoissonFunction/lam-5242879-20.golden new file mode 100644 index 000000000..367cf4a6d --- /dev/null +++ b/chain/types/testdata/TestPoissonFunction/lam-5242879-20.golden @@ -0,0 +1,17 @@ +icdf +115011887533064380050215202002871794783671664388541274939039184893270600703151 +111110879755863630144777547269452207577572562543679248972859798819416242535921 +101358362173007217989878395557465593373392010546833825352856759707561945139525 +85104169301821710755220628061805234978705652326202881500299286939503312327242 +64786432088141395502693562453332343133008530920262028792733819517798429528997 +44468698749761909885846743680008378684637277300179598543979674797636862943384 +27537257530529080605729671834720742022613060678716434560927439906969908575619 +15443373252088578337713549627194971124753642243467082552611819728291522561946 +7884697019766617162458477655388623015603913245952633503615157778326861227793 +3685433247200570820185174890022164698361097802621279879954268046011715772231 +1585801761390548420193995297013958176769177427873274589439743405082427147382 +631423995328231141553650878972791975288890578033071631113620389859816342901 +233766668649395912353875000216328335294367462954196928187468994385755448892 +80821572175657684536655650469711580587115741420816601432036447172153463116 +26198333853594769407667441209847842642730676168975225661522405972966431948 +7990591219092428810606628986022697269060757693982546042792694706871429282 diff --git a/chain/types/tipset.go b/chain/types/tipset.go index a4fb209cb..44d41c29d 100644 --- a/chain/types/tipset.go +++ b/chain/types/tipset.go @@ -7,8 +7,10 @@ import ( "io" "sort" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" + logging "github.com/ipfs/go-log/v2" + "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" ) @@ -18,18 +20,18 @@ var log = logging.Logger("types") type TipSet struct { cids []cid.Cid blks []*BlockHeader - height uint64 + height abi.ChainEpoch } -// why didnt i just export the fields? Because the struct has methods with the -// same names already type ExpTipSet struct { Cids []cid.Cid Blocks []*BlockHeader - Height uint64 + Height abi.ChainEpoch } func (ts *TipSet) MarshalJSON() ([]byte, error) { + // why didnt i just export the fields? Because the struct has methods with the + // same names already return json.Marshal(ExpTipSet{ Cids: ts.cids, Blocks: ts.blks, @@ -88,13 +90,19 @@ func tipsetSortFunc(blks []*BlockHeader) func(i, j int) bool { if ti.Equals(tj) { log.Warnf("blocks have same ticket (%s %s)", blks[i].Miner, blks[j].Miner) - return blks[i].Cid().KeyString() < blks[j].Cid().KeyString() + return bytes.Compare(blks[i].Cid().Bytes(), blks[j].Cid().Bytes()) < 0 } return ti.Less(tj) } } +// Checks: +// * A tipset is composed of at least one block. (Because of our variable +// number of blocks per tipset, determined by randomness, we do not impose +// an upper limit.) +// * All blocks have the same height. +// * All blocks have the same parents (same number of them and matching CIDs). func NewTipSet(blks []*BlockHeader) (*TipSet, error) { if len(blks) == 0 { return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks") @@ -110,6 +118,10 @@ func NewTipSet(blks []*BlockHeader) (*TipSet, error) { return nil, fmt.Errorf("cannot create tipset with mismatching heights") } + if len(blks[0].Parents) != len(b.Parents) { + return nil, fmt.Errorf("cannot create tipset with mismatching number of parents") + } + for i, cid := range b.Parents { if cid != blks[0].Parents[i] { return nil, fmt.Errorf("cannot create tipset with mismatching parents") @@ -129,10 +141,13 @@ func (ts *TipSet) Cids() []cid.Cid { } func (ts *TipSet) Key() TipSetKey { + if ts == nil { + return EmptyTSK + } return NewTipSetKey(ts.cids...) } -func (ts *TipSet) Height() uint64 { +func (ts *TipSet) Height() abi.ChainEpoch { return ts.height } @@ -166,7 +181,9 @@ func (ts *TipSet) Equals(ots *TipSet) bool { } func (t *Ticket) Less(o *Ticket) bool { - return bytes.Compare(t.VRFProof, o.VRFProof) < 0 + tDigest := blake2b.Sum256(t.VRFProof) + oDigest := blake2b.Sum256(o.VRFProof) + return bytes.Compare(tDigest[:], oDigest[:]) < 0 } func (ts *TipSet) MinTicket() *Ticket { @@ -213,3 +230,15 @@ func (ts *TipSet) Contains(oc cid.Cid) bool { } return false } + +func (ts *TipSet) IsChildOf(parent *TipSet) bool { + return CidArrsEqual(ts.Parents().Cids(), parent.Cids()) && + // FIXME: The height check might go beyond what is meant by + // "parent", but many parts of the code rely on the tipset's + // height for their processing logic at the moment to obviate it. + ts.height > parent.height +} + +func (ts *TipSet) String() string { + return fmt.Sprintf("%v", ts.cids) +} diff --git a/chain/types/tipset_key.go b/chain/types/tipset_key.go index 194f712db..e5bc7750d 100644 --- a/chain/types/tipset_key.go +++ b/chain/types/tipset_key.go @@ -5,15 +5,19 @@ import ( "encoding/json" "strings" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" ) +var EmptyTSK = TipSetKey{} + // The length of a block header CID in bytes. var blockHeaderCIDLen int func init() { - c, err := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31}.Sum([]byte{}) + // hash a large string of zeros so we don't estimate based on inlined CIDs. + var buf [256]byte + c, err := abi.CidBuilder.Sum(buf[:]) if err != nil { panic(err) } @@ -90,6 +94,10 @@ func (k *TipSetKey) UnmarshalJSON(b []byte) error { return nil } +func (k TipSetKey) IsEmpty() bool { + return len(k.value) == 0 +} + func encodeKey(cids []cid.Cid) []byte { buffer := new(bytes.Buffer) for _, c := range cids { diff --git a/chain/types/tipset_key_test.go b/chain/types/tipset_key_test.go index 43ff1a3df..7b3ce439d 100644 --- a/chain/types/tipset_key_test.go +++ b/chain/types/tipset_key_test.go @@ -61,9 +61,9 @@ func TestTipSetKey(t *testing.T) { t.Run("JSON", func(t *testing.T) { k0 := NewTipSetKey() - verifyJson(t, "[]", k0) + verifyJSON(t, "[]", k0) k3 := NewTipSetKey(c1, c2, c3) - verifyJson(t, `[`+ + verifyJSON(t, `[`+ `{"/":"bafy2bzacecesrkxghscnq7vatble2hqdvwat6ed23vdu4vvo3uuggsoaya7ki"},`+ `{"/":"bafy2bzacebxfyh2fzoxrt6kcgc5dkaodpcstgwxxdizrww225vrhsizsfcg4g"},`+ `{"/":"bafy2bzacedwviarjtjraqakob5pslltmuo5n3xev3nt5zylezofkbbv5jclyu"}`+ @@ -71,7 +71,7 @@ func TestTipSetKey(t *testing.T) { }) } -func verifyJson(t *testing.T, expected string, k TipSetKey) { +func verifyJSON(t *testing.T, expected string, k TipSetKey) { bytes, err := json.Marshal(k) require.NoError(t, err) assert.Equal(t, expected, string(bytes)) diff --git a/chain/types/types_test.go b/chain/types/types_test.go index d912c20e3..1056fc430 100644 --- a/chain/types/types_test.go +++ b/chain/types/types_test.go @@ -22,13 +22,14 @@ func blsaddr(n int64) address.Address { func BenchmarkSerializeMessage(b *testing.B) { m := &Message{ - To: blsaddr(1), - From: blsaddr(2), - Nonce: 197, - Method: 1231254, - Params: []byte("some bytes, idk. probably at least ten of them"), - GasLimit: NewInt(126723), - GasPrice: NewInt(1776234), + To: blsaddr(1), + From: blsaddr(2), + Nonce: 197, + Method: 1231254, + Params: []byte("some bytes, idk. probably at least ten of them"), + GasLimit: 126723, + GasPremium: NewInt(1245667), + GasFeeCap: NewInt(1245667), } b.ReportAllocs() diff --git a/chain/types/vmcontext.go b/chain/types/vmcontext.go index 1af2aa3da..d0ce42c0f 100644 --- a/chain/types/vmcontext.go +++ b/chain/types/vmcontext.go @@ -1,13 +1,10 @@ package types import ( - "context" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-amt-ipld" "github.com/filecoin-project/lotus/chain/actors/aerrors" + cid "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" cbg "github.com/whyrusleeping/cbor-gen" ) @@ -24,31 +21,10 @@ type Storage interface { type StateTree interface { SetActor(addr address.Address, act *Actor) error + // GetActor returns the actor from any type of `addr` provided. GetActor(addr address.Address) (*Actor, error) } -type VMContext interface { - Message() *Message - Origin() address.Address - Ipld() *hamt.CborIpldStore - Send(to address.Address, method uint64, value BigInt, params []byte) ([]byte, aerrors.ActorError) - BlockHeight() uint64 - GasUsed() BigInt - Storage() Storage - StateTree() (StateTree, aerrors.ActorError) - VerifySignature(sig *Signature, from address.Address, data []byte) aerrors.ActorError - ChargeGas(uint64) aerrors.ActorError - GetRandomness(height uint64) ([]byte, aerrors.ActorError) - GetBalance(address.Address) (BigInt, aerrors.ActorError) - Sys() *VMSyscalls - - Context() context.Context -} - -type VMSyscalls struct { - ValidatePoRep func(context.Context, address.Address, uint64, []byte, []byte, []byte, []byte, []byte, uint64) (bool, aerrors.ActorError) -} - type storageWrapper struct { s Storage } @@ -69,7 +45,3 @@ func (sw *storageWrapper) Get(c cid.Cid, out cbg.CBORUnmarshaler) error { return nil } - -func WrapStorage(s Storage) amt.Blocks { - return &storageWrapper{s} -} diff --git a/chain/types/voucher.go b/chain/types/voucher.go index ba2bab6ec..687109c33 100644 --- a/chain/types/voucher.go +++ b/chain/types/voucher.go @@ -1,88 +1,22 @@ package types import ( - "bytes" "encoding/base64" - "github.com/filecoin-project/go-address" - cborrpc "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/specs-actors/actors/builtin/paych" cbor "github.com/ipfs/go-ipld-cbor" ) -type SignedVoucher struct { - TimeLock uint64 - SecretPreimage []byte - Extra *ModVerifyParams - Lane uint64 - Nonce uint64 - Amount BigInt - MinCloseHeight uint64 - - Merges []Merge - - Signature *Signature -} - -func (sv *SignedVoucher) SigningBytes() ([]byte, error) { - osv := *sv - osv.Signature = nil - - buf := new(bytes.Buffer) - if err := osv.MarshalCBOR(buf); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (sv *SignedVoucher) EncodedString() (string, error) { - buf := new(bytes.Buffer) - if err := sv.MarshalCBOR(buf); err != nil { - return "", err - } - - return base64.RawURLEncoding.EncodeToString(buf.Bytes()), nil -} - -func (sv *SignedVoucher) Equals(other *SignedVoucher) bool { - // TODO: make this less bad - - selfB, err := cborrpc.Dump(sv) - if err != nil { - log.Errorf("SignedVoucher.Equals: dump self: %s", err) - return false - } - - otherB, err := cborrpc.Dump(other) - if err != nil { - log.Errorf("SignedVoucher.Equals: dump other: %s", err) - return false - } - - return bytes.Equal(selfB, otherB) -} - -func DecodeSignedVoucher(s string) (*SignedVoucher, error) { +func DecodeSignedVoucher(s string) (*paych.SignedVoucher, error) { data, err := base64.RawURLEncoding.DecodeString(s) if err != nil { return nil, err } - var sv SignedVoucher + var sv paych.SignedVoucher if err := cbor.DecodeInto(data, &sv); err != nil { return nil, err } return &sv, nil } - -type Merge struct { - Lane uint64 - Nonce uint64 -} - -type ModVerifyParams struct { - Actor address.Address - Method uint64 - Data []byte -} diff --git a/chain/types_test.go b/chain/types_test.go index 17b6c9cdd..b47471c9d 100644 --- a/chain/types_test.go +++ b/chain/types_test.go @@ -1,9 +1,12 @@ package chain import ( + "crypto/rand" "encoding/json" "testing" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/types" ) @@ -13,14 +16,15 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) { from, _ := address.NewIDAddress(603911192) smsg := &types.SignedMessage{ Message: types.Message{ - To: to, - From: from, - Params: []byte("some bytes, idk"), - Method: 1235126, - Value: types.NewInt(123123), - GasPrice: types.NewInt(1234), - GasLimit: types.NewInt(9992969384), - Nonce: 123123, + To: to, + From: from, + Params: []byte("some bytes, idk"), + Method: 1235126, + Value: types.NewInt(123123), + GasFeeCap: types.NewInt(1234), + GasPremium: types.NewInt(132414234), + GasLimit: 100_000_000, + Nonce: 123123, }, } @@ -34,3 +38,40 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) { t.Fatal(err) } } + +func TestAddressType(t *testing.T) { + build.SetAddressNetwork(address.Testnet) + addr, err := makeRandomAddress() + if err != nil { + t.Fatal(err) + } + + if string(addr[0]) != address.TestnetPrefix { + t.Fatalf("address should start with %s", address.TestnetPrefix) + } + + build.SetAddressNetwork(address.Mainnet) + addr, err = makeRandomAddress() + if err != nil { + t.Fatal(err) + } + + if string(addr[0]) != address.MainnetPrefix { + t.Fatalf("address should start with %s", address.MainnetPrefix) + } +} + +func makeRandomAddress() (string, error) { + bytes := make([]byte, 32) + _, err := rand.Read(bytes) + if err != nil { + return "", err + } + + addr, err := address.NewActorAddress(bytes) + if err != nil { + return "", err + } + + return addr.String(), nil +} diff --git a/chain/validation/applier.go b/chain/validation/applier.go deleted file mode 100644 index 4e21f1228..000000000 --- a/chain/validation/applier.go +++ /dev/null @@ -1,65 +0,0 @@ -package validation - -import ( - "context" - - vchain "github.com/filecoin-project/chain-validation/pkg/chain" - vstate "github.com/filecoin-project/chain-validation/pkg/state" - vtypes "github.com/filecoin-project/chain-validation/pkg/state/types" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" -) - -// Applier applies messages to state trees and storage. -type Applier struct { -} - -var _ vchain.Applier = &Applier{} - -func NewApplier() *Applier { - return &Applier{} -} - -func (a *Applier) ApplyMessage(eCtx *vchain.ExecutionContext, state vstate.Wrapper, message interface{}) (vchain.MessageReceipt, error) { - ctx := context.TODO() - st := state.(*StateWrapper) - - base := st.Cid() - randSrc := &vmRand{eCtx} - minerAddr, err := address.NewFromBytes(eCtx.MinerOwner.Bytes()) - if err != nil { - return vchain.MessageReceipt{}, err - } - lotusVM, err := vm.NewVM(base, eCtx.Epoch, randSrc, minerAddr, st.bs) - if err != nil { - return vchain.MessageReceipt{}, err - } - - ret, err := lotusVM.ApplyMessage(ctx, message.(*types.Message)) - if err != nil { - return vchain.MessageReceipt{}, err - } - - st.stateRoot, err = lotusVM.Flush(ctx) - if err != nil { - return vchain.MessageReceipt{}, err - } - - mr := vchain.MessageReceipt{ - ExitCode: ret.ExitCode, - ReturnValue: ret.Return, - GasUsed: vtypes.GasUnit(ret.GasUsed.Uint64()), - } - - return mr, ret.ActorErr -} - -type vmRand struct { - eCtx *vchain.ExecutionContext -} - -func (*vmRand) GetRandomness(ctx context.Context, h int64) ([]byte, error) { - panic("implement me") -} diff --git a/chain/validation/factories.go b/chain/validation/factories.go deleted file mode 100644 index 336db2282..000000000 --- a/chain/validation/factories.go +++ /dev/null @@ -1,27 +0,0 @@ -package validation - -import ( - vchain "github.com/filecoin-project/chain-validation/pkg/chain" - vstate "github.com/filecoin-project/chain-validation/pkg/state" - "github.com/filecoin-project/chain-validation/pkg/suites" -) - -type factories struct { - *Applier -} - -var _ suites.Factories = &factories{} - -func NewFactories() *factories { - applier := NewApplier() - return &factories{applier} -} - -func (f *factories) NewState() vstate.Wrapper { - return NewState() -} - -func (f *factories) NewMessageFactory(wrapper vstate.Wrapper) vchain.MessageFactory { - signer := wrapper.(*StateWrapper).Signer() - return NewMessageFactory(signer) -} diff --git a/chain/validation/message.go b/chain/validation/message.go deleted file mode 100644 index 2e497184f..000000000 --- a/chain/validation/message.go +++ /dev/null @@ -1,96 +0,0 @@ -package validation - -import ( - "context" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - vchain "github.com/filecoin-project/chain-validation/pkg/chain" - vactors "github.com/filecoin-project/chain-validation/pkg/state/actors" - vaddress "github.com/filecoin-project/chain-validation/pkg/state/address" - vtypes "github.com/filecoin-project/chain-validation/pkg/state/types" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" -) - -type Signer interface { - Sign(ctx context.Context, addr vaddress.Address, msg []byte) (*types.Signature, error) -} - -type MessageFactory struct { - signer Signer -} - -var _ vchain.MessageFactory = &MessageFactory{} - -func NewMessageFactory(signer Signer) *MessageFactory { - return &MessageFactory{signer} -} - -func (mf *MessageFactory) MakeMessage(from, to vaddress.Address, method vchain.MethodID, nonce uint64, value, gasPrice vtypes.BigInt, gasLimit vtypes.GasUnit, params []byte) (interface{}, error) { - fromDec, err := address.NewFromBytes(from.Bytes()) - if err != nil { - return nil, err - } - toDec, err := address.NewFromBytes(to.Bytes()) - if err != nil { - return nil, err - } - valueDec := types.BigInt{value.Int} - - if int(method) >= len(methods) { - return nil, xerrors.Errorf("No method name for method %v", method) - } - methodId := methods[method] - msg := &types.Message{ - toDec, - fromDec, - nonce, - valueDec, - types.BigInt{gasPrice.Int}, - types.NewInt(uint64(gasLimit)), - methodId, - params, - } - - return msg, nil -} - -func (mf *MessageFactory) FromSingletonAddress(addr vactors.SingletonActorID) vaddress.Address { - return fromSingletonAddress(addr) -} - -func (mf *MessageFactory) FromActorCodeCid(code vactors.ActorCodeID) cid.Cid { - return fromActorCode(code) -} - -// Maps method enumeration values to method names. -// This will change to a mapping to method ids when method dispatch is updated to use integers. -var methods = []uint64{ - vchain.NoMethod: 0, - vchain.InitExec: actors.IAMethods.Exec, - - vchain.StoragePowerConstructor: actors.SPAMethods.Constructor, - vchain.StoragePowerCreateStorageMiner: actors.SPAMethods.CreateStorageMiner, - vchain.StoragePowerUpdatePower: actors.SPAMethods.UpdateStorage, - - vchain.StorageMinerUpdatePeerID: actors.MAMethods.UpdatePeerID, - vchain.StorageMinerGetOwner: actors.MAMethods.GetOwner, - vchain.StorageMinerGetPower: actors.MAMethods.GetPower, - vchain.StorageMinerGetWorkerAddr: actors.MAMethods.GetWorkerAddr, - vchain.StorageMinerGetPeerID: actors.MAMethods.GetPeerID, - vchain.StorageMinerGetSectorSize: actors.MAMethods.GetSectorSize, - - vchain.MultiSigConstructor: actors.MultiSigMethods.MultiSigConstructor, - vchain.MultiSigPropose: actors.MultiSigMethods.Propose, - vchain.MultiSigApprove: actors.MultiSigMethods.Approve, - vchain.MultiSigCancel: actors.MultiSigMethods.Cancel, - vchain.MultiSigClearCompleted: actors.MultiSigMethods.ClearCompleted, - vchain.MultiSigAddSigner: actors.MultiSigMethods.AddSigner, - vchain.MultiSigRemoveSigner: actors.MultiSigMethods.RemoveSigner, - vchain.MultiSigSwapSigner: actors.MultiSigMethods.SwapSigner, - vchain.MultiSigChangeRequirement: actors.MultiSigMethods.ChangeRequirement, - // More to follow... -} diff --git a/chain/validation/message_test.go b/chain/validation/message_test.go deleted file mode 100644 index 65fcc94dc..000000000 --- a/chain/validation/message_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package validation - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - vchain "github.com/filecoin-project/chain-validation/pkg/chain" - vactors "github.com/filecoin-project/chain-validation/pkg/state/actors" - vaddress "github.com/filecoin-project/chain-validation/pkg/state/address" - vtypes "github.com/filecoin-project/chain-validation/pkg/state/types" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" -) - -func TestMessageFactory(t *testing.T) { - ks := wallet.NewMemKeyStore() - wallet, err := wallet.NewWallet(ks) - require.NoError(t, err) - factory := NewMessageFactory(&walletWrapper{wallet}) - - gasPrice := vtypes.NewInt(1) - gasLimit := vtypes.GasUnit(1000) - p := vchain.NewMessageProducer(factory, gasLimit, gasPrice) - - sender, err := wallet.GenerateKey(types.KTSecp256k1) - require.NoError(t, err) - - bfAddr := factory.FromSingletonAddress(vactors.BurntFundsAddress) - addr, err := vaddress.NewFromBytes(sender.Bytes()) - require.NoError(t, err) - m, err := p.Transfer(addr, bfAddr, 0, 1) - require.NoError(t, err) - - messages := p.Messages() - assert.Equal(t, 1, len(messages)) - msg := m.(*types.Message) - assert.Equal(t, m, msg) - assert.Equal(t, sender, msg.From) - assert.Equal(t, actors.BurntFundsAddress, msg.To) - assert.Equal(t, types.NewInt(1), msg.Value) -} - -type walletWrapper struct { - w *wallet.Wallet -} - -func (ww *walletWrapper) Sign(ctx context.Context, vaddr vaddress.Address, msg []byte) (*types.Signature, error) { - addr, err := address.NewFromBytes(vaddr.Bytes()) - if err != nil { - return nil, err - } - return ww.w.Sign(ctx, addr, msg) -} diff --git a/chain/validation/state.go b/chain/validation/state.go deleted file mode 100644 index f03aee5d8..000000000 --- a/chain/validation/state.go +++ /dev/null @@ -1,365 +0,0 @@ -package validation - -import ( - "context" - "fmt" - "math/rand" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-hamt-ipld" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/minio/blake2b-simd" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - vstate "github.com/filecoin-project/chain-validation/pkg/state" - vactors "github.com/filecoin-project/chain-validation/pkg/state/actors" - vaddress "github.com/filecoin-project/chain-validation/pkg/state/address" - vtypes "github.com/filecoin-project/chain-validation/pkg/state/types" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-crypto" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" -) - -type StateWrapper struct { - // The blockstore underlying the state tree and storage. - bs blockstore.Blockstore - // HAMT-CBOR store on top of the blockstore. - cst *hamt.CborIpldStore - // A store for encryption keys. - keys *keyStore - - // CID of the root of the state tree. - stateRoot cid.Cid - // The root node of the state tree, essentially a cache of LoadStateTree(cst, stateRoot) - //tree *state.StateTree - // A look-through storage implementation. - storage *directStorage -} - -var _ vstate.Wrapper = &StateWrapper{} - -func NewState() *StateWrapper { - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - cst := hamt.CSTFromBstore(bs) - // Put EmptyObjectCid value in the store. When an actor is initially created its Head is set to this value. - _, err := cst.Put(context.TODO(), map[string]string{}) - if err != nil { - panic(err) - } - - treeImpl, err := state.NewStateTree(cst) - if err != nil { - panic(err) // Never returns error, the error return should be removed. - } - root, err := treeImpl.Flush() - if err != nil { - panic(err) - } - storageImpl := &directStorage{cst} - return &StateWrapper{bs, cst, newKeyStore(), root, storageImpl} -} - -func (s *StateWrapper) Cid() cid.Cid { - return s.stateRoot -} - -func (s *StateWrapper) Actor(addr vaddress.Address) (vstate.Actor, error) { - vaddr, err := address.NewFromBytes(addr.Bytes()) - if err != nil { - return nil, err - } - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return nil, err - } - fcActor, err := tree.GetActor(vaddr) - if err != nil { - return nil, err - } - return &actorWrapper{*fcActor}, nil -} - -func (s *StateWrapper) Storage(addr vaddress.Address) (vstate.Storage, error) { - return s.storage, nil -} - -func (s *StateWrapper) NewAccountAddress() (vaddress.Address, error) { - return s.keys.NewAddress() -} - -func (s *StateWrapper) SetActor(addr vaddress.Address, code vactors.ActorCodeID, balance vtypes.BigInt) (vstate.Actor, vstate.Storage, error) { - addrInt, err := address.NewFromBytes(addr.Bytes()) - if err != nil { - return nil, nil, err - } - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return nil, nil, err - } - actr := &actorWrapper{types.Actor{ - Code: fromActorCode(code), - Balance: types.BigInt{balance.Int}, - Head: vm.EmptyObjectCid, - }} - // The ID-based address is dropped here, but should be reported back to the caller. - _, err = tree.RegisterNewAddress(addrInt, &actr.Actor) - if err != nil { - return nil, nil, xerrors.Errorf("register new address for actor: %w", err) - } - return actr, s.storage, s.flush(tree) -} - -func (s *StateWrapper) SetSingletonActor(addr vactors.SingletonActorID, balance vtypes.BigInt) (vstate.Actor, vstate.Storage, error) { - vaddr := fromSingletonAddress(addr) - - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return nil, nil, err - } - - lotusAddr, err := address.NewFromBytes(vaddr.Bytes()) - if err != nil { - return nil, nil, err - } - - switch lotusAddr { - case actors.InitAddress: - initact, err := gen.SetupInitActor(s.bs, nil) - if err != nil { - return nil, nil, err - } - if err := tree.SetActor(actors.InitAddress, initact); err != nil { - return nil, nil, xerrors.Errorf("set init actor: %w", err) - } - - return &actorWrapper{*initact}, s.storage, s.flush(tree) - case actors.StorageMarketAddress: - nsroot, err := gen.SetupStorageMarketActor(s.bs, s.stateRoot, nil) - if err != nil { - return nil, nil, err - } - s.stateRoot = nsroot - - tree, err = state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return nil, nil, err - } - smact, err := tree.GetActor(actors.StorageMarketAddress) - if err != nil { - return nil, nil, err - } - return &actorWrapper{*smact}, s.storage, s.flush(tree) - case actors.StoragePowerAddress: - spact, err := gen.SetupStoragePowerActor(s.bs) - if err != nil { - return nil, nil, err - } - if err := tree.SetActor(actors.StoragePowerAddress, spact); err != nil { - return nil, nil, xerrors.Errorf("set network storage market actor: %w", err) - } - return &actorWrapper{*spact}, s.storage, s.flush(tree) - case actors.NetworkAddress: - ntwkact := &types.Actor{ - Code: actors.AccountCodeCid, - Balance: types.BigInt{balance.Int}, - Head: vm.EmptyObjectCid, - } - if err := tree.SetActor(actors.NetworkAddress, ntwkact); err != nil { - return nil, nil, xerrors.Errorf("set network actor: %w", err) - } - return &actorWrapper{*ntwkact}, s.storage, s.flush(tree) - case actors.BurntFundsAddress: - ntwkact := &types.Actor{ - Code: actors.AccountCodeCid, - Balance: types.BigInt{balance.Int}, - Head: vm.EmptyObjectCid, - } - if err := tree.SetActor(actors.BurntFundsAddress, ntwkact); err != nil { - return nil, nil, xerrors.Errorf("set network actor: %w", err) - } - return &actorWrapper{*ntwkact}, s.storage, s.flush(tree) - default: - return nil, nil, xerrors.Errorf("%v is not a singleton actor address", addr) - } -} - -func (s *StateWrapper) Sign(ctx context.Context, addr vaddress.Address, data []byte) (*vtypes.Signature, error) { - sig, err := s.keys.Sign(ctx, addr, data) - if err != nil { - return nil, err - } - return &vtypes.Signature{ - Type: sig.Type, - Data: sig.Data, - }, nil -} - -func (s *StateWrapper) Signer() *keyStore { - return s.keys -} - -// Flushes a state tree to storage and sets this state's root to that tree's root CID. -func (s *StateWrapper) flush(tree *state.StateTree) (err error) { - s.stateRoot, err = tree.Flush() - return -} - -// -// Key store -// -type keyStore struct { - // Private keys by address - keys map[vaddress.Address]vtypes.KeyInfo - // Seed for deterministic key generation. - seed int64 -} - -func newKeyStore() *keyStore { - return &keyStore{ - keys: make(map[vaddress.Address]vtypes.KeyInfo), - seed: 0, - } -} - -func (s *keyStore) NewAddress() (vaddress.Address, error) { - randSrc := rand.New(rand.NewSource(s.seed)) - prv, err := crypto.GenerateKeyFromSeed(randSrc) - if err != nil { - return vaddress.Undef, err - } - - vki := vtypes.KeyInfo{ - PrivateKey: prv, - Type: types.KTSecp256k1, - } - key, err := wallet.NewKey(types.KeyInfo{ - Type: vki.Type, - PrivateKey: vki.PrivateKey, - }) - if err != nil { - return vaddress.Undef, err - } - vaddr, err := vaddress.NewFromBytes(key.Address.Bytes()) - if err != nil { - return vaddress.Undef, err - } - s.keys[vaddr] = vki - s.seed++ - return vaddress.NewFromBytes(key.Address.Bytes()) -} - -func (s *keyStore) Sign(ctx context.Context, addr vaddress.Address, data []byte) (*types.Signature, error) { - ki, ok := s.keys[addr] - if !ok { - return &types.Signature{}, fmt.Errorf("unknown address %v", addr) - } - b2sum := blake2b.Sum256(data) - digest, err := crypto.Sign(ki.PrivateKey, b2sum[:]) - if err != nil { - return &types.Signature{}, err - } - return &types.Signature{ - Type: types.KTSecp256k1, - Data: digest, - }, nil -} - -// -// Actor Wrapper -// - -type actorWrapper struct { - types.Actor -} - -func (a *actorWrapper) Code() cid.Cid { - return a.Actor.Code -} - -func (a *actorWrapper) Head() cid.Cid { - return a.Actor.Head -} - -func (a *actorWrapper) Nonce() uint64 { - return a.Actor.Nonce -} - -func (a *actorWrapper) Balance() vtypes.BigInt { - return vtypes.NewInt(a.Actor.Balance.Uint64()) - -} - -// -// Storage -// - -type directStorage struct { - cst *hamt.CborIpldStore -} - -func (d *directStorage) Get(c cid.Cid, out interface{}) error { - if err := d.cst.Get(context.TODO(), c, out.(cbg.CBORUnmarshaler)); err != nil { - return err - } - return nil -} - -func fromActorCode(code vactors.ActorCodeID) cid.Cid { - switch code { - case vactors.AccountActorCodeCid: - return actors.AccountCodeCid - case vactors.StorageMinerCodeCid: - return actors.StorageMinerCodeCid - case vactors.MultisigActorCodeCid: - return actors.MultisigCodeCid - case vactors.PaymentChannelActorCodeCid: - return actors.PaymentChannelCodeCid - default: - panic(fmt.Errorf("unknown actor code: %v", code)) - } -} - -func fromSingletonAddress(addr vactors.SingletonActorID) vaddress.Address { - switch addr { - case vactors.InitAddress: - out, err := vaddress.NewFromBytes(actors.InitAddress.Bytes()) - if err != nil { - panic(err) - } - return out - case vactors.NetworkAddress: - out, err := vaddress.NewFromBytes(actors.NetworkAddress.Bytes()) - if err != nil { - panic(err) - } - return out - case vactors.StorageMarketAddress: - out, err := vaddress.NewFromBytes(actors.StorageMarketAddress.Bytes()) - if err != nil { - panic(err) - } - return out - case vactors.BurntFundsAddress: - out, err := vaddress.NewFromBytes(actors.BurntFundsAddress.Bytes()) - if err != nil { - panic(err) - } - return out - case vactors.StoragePowerAddress: - out, err := vaddress.NewFromBytes(actors.StoragePowerAddress.Bytes()) - if err != nil { - panic(err) - } - return out - default: - panic(fmt.Errorf("unknown singleton actor address: %v", addr)) - } -} diff --git a/chain/vectors/gen/main.go b/chain/vectors/gen/main.go new file mode 100644 index 000000000..2b1c6f340 --- /dev/null +++ b/chain/vectors/gen/main.go @@ -0,0 +1,207 @@ +package main + +import ( + "encoding/json" + "fmt" + "math/rand" + "os" + + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + + "github.com/filecoin-project/go-address" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" + "github.com/filecoin-project/lotus/chain/vectors" + "github.com/filecoin-project/lotus/chain/wallet" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" +) + +func init() { + verifreg0.MinVerifiedDealSize = big.NewInt(2048) + power0.ConsensusMinerMinPower = big.NewInt(2048) +} + +func MakeHeaderVectors() []vectors.HeaderVector { + cg, err := gen.NewGenerator() + if err != nil { + panic(err) + } + + var out []vectors.HeaderVector + for i := 0; i < 5; i++ { + nts, err := cg.NextTipSet() + if err != nil { + panic(err) + } + + h := nts.TipSet.Blocks[0].Header + data, err := h.Serialize() + if err != nil { + panic(err) + } + + out = append(out, vectors.HeaderVector{ + Block: h, + Cid: h.Cid().String(), + CborHex: fmt.Sprintf("%x", data), + }) + } + return out +} + +func MakeMessageSigningVectors() []vectors.MessageSigningVector { + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + panic(err) + } + + blsk, err := w.GenerateKey(crypto.SigTypeBLS) + if err != nil { + panic(err) + } + bki, err := w.Export(blsk) + if err != nil { + panic(err) + } + + to, err := address.NewIDAddress(99999) + if err != nil { + panic(err) + } + + bmsg := mock.MkMessage(blsk, to, 55, w) + + blsmsv := vectors.MessageSigningVector{ + Unsigned: &bmsg.Message, + Cid: bmsg.Message.Cid().String(), + CidHexBytes: fmt.Sprintf("%x", bmsg.Message.Cid().Bytes()), + PrivateKey: bki.PrivateKey, + Signature: &bmsg.Signature, + } + + secpk, err := w.GenerateKey(crypto.SigTypeBLS) + if err != nil { + panic(err) + } + ski, err := w.Export(secpk) + if err != nil { + panic(err) + } + + smsg := mock.MkMessage(secpk, to, 55, w) + + smsv := vectors.MessageSigningVector{ + Unsigned: &smsg.Message, + Cid: smsg.Message.Cid().String(), + CidHexBytes: fmt.Sprintf("%x", smsg.Message.Cid().Bytes()), + PrivateKey: ski.PrivateKey, + Signature: &smsg.Signature, + } + + return []vectors.MessageSigningVector{blsmsv, smsv} +} + +func MakeUnsignedMessageVectors() []vectors.UnsignedMessageVector { + froms := []string{ + "t2ch7krq7l35i74rebqbjdsp3ucl47t24e3juxjfa", + "t1pyfq7dg6sq65acyomqvzvbgwni4zllglqffw5dy", + "t1cyg66djxytxhzdq7ynoqfxk7xinp6xsejbeufli", + "t16n7vrq5humzoqll7zg4yw6dta645tuakcoalp6y", + "t1awsiuji4wpbxpzslg36f3wnfxzi4o5gq67tz2mi", + "t14mb3j32uuwajy5b2mliz63isp6zl5xkppzyuhfy", + "t1dzdmyzzdy6q5elobj63eokzv2xnwsp4vm5l6aka", + "t1svd45rkcfpsyqedvvhuv77yvllvu5ygmygjlvka", + "t1mrret5liwh46qde6qhaxrmcwil7jawjeqdijwfq", + "t1ly3ynedw74p4q3ytdnb4stjdkiodrl54moeyxea", + "t1uqexvn66gj4lxkbvmrgposwrlxbyd655o2nayyi", + "t1dwwjod7vw62jzw2eva7gtxohaidjhgh6w2rofui", + "t1slswisymmkfulmvl3jynrnwqi27tkvmsgzhztvy", + "t1e3vymxcdqfkqwz6e6wnxxx6ayuml3vxi5gef4xa", + "t1bgqopgk64ywpprka4citgi62aldclyaegvwvx6y", + "t1aizqgl2klzkzffwu35rufyuzefke2i6ndbewuhi", + "t1mzposcnsd2tc66yu5i3kajtrh5pvwohdjvitcey", + "t1x7xvs6oorrrlefyzn6wlbvaibzj3a2fyt4hsmvq", + "t1ez743nvc4j7qfirwnmxbh4qdqwha3iyalnq4rya", + "t17dvtgkop7cqgi6myjne5kzvrnsbg5wnowjphhwy", + "t1kvar5z3q7dwrfxjqsnuqpq5qsd7mvh2xypblwta", + } + var out []vectors.UnsignedMessageVector + for _, a := range froms { + from, err := address.NewFromString(a) + if err != nil { + panic(err) + } + uint63mask := uint64(1<<63 - 1) + to, err := address.NewIDAddress(rand.Uint64() & uint63mask) + if err != nil { + panic(err) + } + + params := make([]byte, 32) + rand.Read(params) + + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(rand.Uint64()), + Method: abi.MethodNum(rand.Uint64()), + GasFeeCap: types.NewInt(rand.Uint64()), + GasPremium: types.NewInt(rand.Uint64()), + GasLimit: rand.Int63(), + Nonce: rand.Uint64() & (1<<63 - 1), + Params: params, + } + + ser, err := msg.Serialize() + if err != nil { + panic(err) + } + + out = append(out, vectors.UnsignedMessageVector{ + Message: msg, + HexCbor: fmt.Sprintf("%x", ser), + }) + } + return out +} + +func WriteJsonToFile(fname string, obj interface{}) error { + fi, err := os.Create(fname) + if err != nil { + return err + } + defer fi.Close() //nolint:errcheck + + out, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + + _, err = fi.Write(out) + if err != nil { + return xerrors.Errorf("writing json: %w", err) + } + + return nil +} + +func main() { + if err := WriteJsonToFile("block_headers.json", MakeHeaderVectors()); err != nil { + panic(err) + } + if err := WriteJsonToFile("message_signing.json", MakeMessageSigningVectors()); err != nil { + panic(err) + } + if err := WriteJsonToFile("unsigned_messages.json", MakeUnsignedMessageVectors()); err != nil { + panic(err) + } +} diff --git a/chain/vectors/vector_types.go b/chain/vectors/vector_types.go new file mode 100644 index 000000000..7e014fb77 --- /dev/null +++ b/chain/vectors/vector_types.go @@ -0,0 +1,25 @@ +package vectors + +import ( + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/types" +) + +type HeaderVector struct { + Block *types.BlockHeader `json:"block"` + CborHex string `json:"cbor_hex"` + Cid string `json:"cid"` +} + +type MessageSigningVector struct { + Unsigned *types.Message + Cid string + CidHexBytes string + PrivateKey []byte + Signature *crypto.Signature +} + +type UnsignedMessageVector struct { + Message *types.Message `json:"message"` + HexCbor string `json:"hex_cbor"` +} diff --git a/chain/vectors/vectors_test.go b/chain/vectors/vectors_test.go new file mode 100644 index 000000000..68cc5ac45 --- /dev/null +++ b/chain/vectors/vectors_test.go @@ -0,0 +1,88 @@ +package vectors + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/filecoin-project/lotus/chain/types" +) + +func LoadVector(t *testing.T, f string, out interface{}) { + p := filepath.Join("../../extern/serialization-vectors", f) + fi, err := os.Open(p) + if err != nil { + t.Fatal(err) + } + defer fi.Close() //nolint:errcheck + + if err := json.NewDecoder(fi).Decode(out); err != nil { + t.Fatal(err) + } +} + +func TestBlockHeaderVectors(t *testing.T) { + t.Skip("we need to regenerate for beacon") + var headers []HeaderVector + LoadVector(t, "block_headers.json", &headers) + + for i, hv := range headers { + if hv.Block.Cid().String() != hv.Cid { + t.Fatalf("CID mismatch in test vector %d", i) + } + + data, err := hv.Block.Serialize() + if err != nil { + t.Fatal(err) + } + + if fmt.Sprintf("%x", data) != hv.CborHex { + t.Fatalf("serialized data mismatched for test vector %d", i) + } + } +} + +func TestMessageSigningVectors(t *testing.T) { + var msvs []MessageSigningVector + LoadVector(t, "message_signing.json", &msvs) + + for i, msv := range msvs { + smsg := &types.SignedMessage{ + Message: *msv.Unsigned, + Signature: *msv.Signature, + } + + if smsg.Cid().String() != msv.Cid { + t.Fatalf("cid of message in vector %d mismatches", i) + } + + // TODO: check signature + } +} + +func TestUnsignedMessageVectors(t *testing.T) { + t.Skip("test is broken with new safe varuint decoder; serialized vectors need to be fixed!") + + var msvs []UnsignedMessageVector + LoadVector(t, "unsigned_messages.json", &msvs) + + for i, msv := range msvs { + b, err := msv.Message.Serialize() + if err != nil { + t.Fatal(err) + } + + dec, err := hex.DecodeString(msv.HexCbor) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(b, dec) { + t.Fatalf("serialization vector %d mismatches bytes", i) + } + } +} diff --git a/chain/vm/burn.go b/chain/vm/burn.go new file mode 100644 index 000000000..9f9b95755 --- /dev/null +++ b/chain/vm/burn.go @@ -0,0 +1,102 @@ +package vm + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" +) + +const ( + gasOveruseNum = 11 + gasOveruseDenom = 10 +) + +type GasOutputs struct { + BaseFeeBurn abi.TokenAmount + OverEstimationBurn abi.TokenAmount + + MinerPenalty abi.TokenAmount + MinerTip abi.TokenAmount + Refund abi.TokenAmount + + GasRefund int64 + GasBurned int64 +} + +// ZeroGasOutputs returns a logically zeroed GasOutputs. +func ZeroGasOutputs() GasOutputs { + return GasOutputs{ + BaseFeeBurn: big.Zero(), + OverEstimationBurn: big.Zero(), + MinerPenalty: big.Zero(), + MinerTip: big.Zero(), + Refund: big.Zero(), + } +} + +// ComputeGasOverestimationBurn computes amount of gas to be refunded and amount of gas to be burned +// Result is (refund, burn) +func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) { + if gasUsed == 0 { + return 0, gasLimit + } + + // over = gasLimit/gasUsed - 1 - 0.1 + // over = min(over, 1) + // gasToBurn = (gasLimit - gasUsed) * over + + // so to factor out division from `over` + // over*gasUsed = min(gasLimit - (11*gasUsed)/10, gasUsed) + // gasToBurn = ((gasLimit - gasUsed)*over*gasUsed) / gasUsed + over := gasLimit - (gasOveruseNum*gasUsed)/gasOveruseDenom + if over < 0 { + return gasLimit - gasUsed, 0 + } + + // if we want sharper scaling it goes here: + // over *= 2 + + if over > gasUsed { + over = gasUsed + } + + // needs bigint, as it overflows in pathological case gasLimit > 2^32 gasUsed = gasLimit / 2 + gasToBurn := big.NewInt(gasLimit - gasUsed) + gasToBurn = big.Mul(gasToBurn, big.NewInt(over)) + gasToBurn = big.Div(gasToBurn, big.NewInt(gasUsed)) + + return gasLimit - gasUsed - gasToBurn.Int64(), gasToBurn.Int64() +} + +func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount) GasOutputs { + gasUsedBig := big.NewInt(gasUsed) + out := ZeroGasOutputs() + + baseFeeToPay := baseFee + if baseFee.Cmp(feeCap.Int) > 0 { + baseFeeToPay = feeCap + out.MinerPenalty = big.Mul(big.Sub(baseFee, feeCap), gasUsedBig) + } + out.BaseFeeBurn = big.Mul(baseFeeToPay, gasUsedBig) + + minerTip := gasPremium + if big.Cmp(big.Add(baseFeeToPay, minerTip), feeCap) > 0 { + minerTip = big.Sub(feeCap, baseFeeToPay) + } + out.MinerTip = big.Mul(minerTip, big.NewInt(gasLimit)) + + out.GasRefund, out.GasBurned = ComputeGasOverestimationBurn(gasUsed, gasLimit) + + if out.GasBurned != 0 { + gasBurnedBig := big.NewInt(out.GasBurned) + out.OverEstimationBurn = big.Mul(baseFeeToPay, gasBurnedBig) + minerPenalty := big.Mul(big.Sub(baseFee, baseFeeToPay), gasBurnedBig) + out.MinerPenalty = big.Add(out.MinerPenalty, minerPenalty) + } + + requiredFunds := big.Mul(big.NewInt(gasLimit), feeCap) + refund := big.Sub(requiredFunds, out.BaseFeeBurn) + refund = big.Sub(refund, out.MinerTip) + refund = big.Sub(refund, out.OverEstimationBurn) + out.Refund = refund + return out +} diff --git a/chain/vm/burn_test.go b/chain/vm/burn_test.go new file mode 100644 index 000000000..58e133605 --- /dev/null +++ b/chain/vm/burn_test.go @@ -0,0 +1,78 @@ +package vm + +import ( + "fmt" + "testing" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/stretchr/testify/assert" +) + +func TestGasBurn(t *testing.T) { + tests := []struct { + used int64 + limit int64 + refund int64 + burn int64 + }{ + {100, 200, 10, 90}, + {100, 150, 30, 20}, + {1000, 1300, 240, 60}, + {500, 700, 140, 60}, + {200, 200, 0, 0}, + {20000, 21000, 1000, 0}, + {0, 2000, 0, 2000}, + {500, 651, 121, 30}, + {500, 5000, 0, 4500}, + {7499e6, 7500e6, 1000000, 0}, + {7500e6 / 2, 7500e6, 375000000, 3375000000}, + {1, 7500e6, 0, 7499999999}, + } + + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { + refund, toBurn := ComputeGasOverestimationBurn(test.used, test.limit) + assert.Equal(t, test.refund, refund, "refund") + assert.Equal(t, test.burn, toBurn, "burned") + }) + } +} + +func TestGasOutputs(t *testing.T) { + baseFee := types.NewInt(10) + tests := []struct { + used int64 + limit int64 + + feeCap uint64 + premium uint64 + + BaseFeeBurn uint64 + OverEstimationBurn uint64 + MinerPenalty uint64 + MinerTip uint64 + Refund uint64 + }{ + {100, 110, 11, 1, 1000, 0, 0, 110, 100}, + {100, 130, 11, 1, 1000, 60, 0, 130, 240}, + {100, 110, 10, 1, 1000, 0, 0, 0, 100}, + {100, 110, 6, 1, 600, 0, 400, 0, 60}, + } + + for _, test := range tests { + test := test + t.Run(fmt.Sprintf("%v", test), func(t *testing.T) { + output := ComputeGasOutputs(test.used, test.limit, baseFee, types.NewInt(test.feeCap), types.NewInt(test.premium)) + i2s := func(i uint64) string { + return fmt.Sprintf("%d", i) + } + assert.Equal(t, i2s(test.BaseFeeBurn), output.BaseFeeBurn.String(), "BaseFeeBurn") + assert.Equal(t, i2s(test.OverEstimationBurn), output.OverEstimationBurn.String(), "OverEstimationBurn") + assert.Equal(t, i2s(test.MinerPenalty), output.MinerPenalty.String(), "MinerPenalty") + assert.Equal(t, i2s(test.MinerTip), output.MinerTip.String(), "MinerTip") + assert.Equal(t, i2s(test.Refund), output.Refund.String(), "Refund") + }) + } + +} diff --git a/chain/vm/gas.go b/chain/vm/gas.go new file mode 100644 index 000000000..12acf6a21 --- /dev/null +++ b/chain/vm/gas.go @@ -0,0 +1,232 @@ +package vm + +import ( + "fmt" + + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + "github.com/filecoin-project/go-address" + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/actors/runtime" + vmr "github.com/filecoin-project/specs-actors/actors/runtime" + "github.com/ipfs/go-cid" +) + +const ( + GasStorageMulti = 1000 + GasComputeMulti = 1 +) + +type GasCharge struct { + Name string + Extra interface{} + + ComputeGas int64 + StorageGas int64 + + VirtualCompute int64 + VirtualStorage int64 +} + +func (g GasCharge) Total() int64 { + return g.ComputeGas*GasComputeMulti + g.StorageGas*GasStorageMulti +} +func (g GasCharge) WithVirtual(compute, storage int64) GasCharge { + out := g + out.VirtualCompute = compute + out.VirtualStorage = storage + return out +} + +func (g GasCharge) WithExtra(extra interface{}) GasCharge { + out := g + out.Extra = extra + return out +} + +func newGasCharge(name string, computeGas int64, storageGas int64) GasCharge { + return GasCharge{ + Name: name, + ComputeGas: computeGas, + StorageGas: storageGas, + } +} + +// Pricelist provides prices for operations in the VM. +// +// Note: this interface should be APPEND ONLY since last chain checkpoint +type Pricelist interface { + // OnChainMessage returns the gas used for storing a message of a given size in the chain. + OnChainMessage(msgSize int) GasCharge + // OnChainReturnValue returns the gas used for storing the response of a message in the chain. + OnChainReturnValue(dataSize int) GasCharge + + // OnMethodInvocation returns the gas used when invoking a method. + OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge + + // OnIpldGet returns the gas used for storing an object + OnIpldGet() GasCharge + // OnIpldPut returns the gas used for storing an object + OnIpldPut(dataSize int) GasCharge + + // OnCreateActor returns the gas used for creating an actor + OnCreateActor() GasCharge + // OnDeleteActor returns the gas used for deleting an actor + OnDeleteActor() GasCharge + + OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) + OnHashing(dataSize int) GasCharge + OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge + OnVerifySeal(info proof.SealVerifyInfo) GasCharge + OnVerifyPost(info proof.WindowPoStVerifyInfo) GasCharge + OnVerifyConsensusFault() GasCharge +} + +var prices = map[abi.ChainEpoch]Pricelist{ + abi.ChainEpoch(0): &pricelistV0{ + onChainMessageComputeBase: 38863, + onChainMessageStorageBase: 36, + onChainMessageStoragePerByte: 1, + + onChainReturnValuePerByte: 1, + + sendBase: 29233, + sendTransferFunds: 27500, + sendTransferOnlyPremium: 159672, + sendInvokeMethod: -5377, + + ipldGetBase: 75242, + ipldPutBase: 84070, + ipldPutPerByte: 1, + + createActorCompute: 1108454, + createActorStorage: 36 + 40, + deleteActor: -(36 + 40), // -createActorStorage + + verifySignature: map[crypto.SigType]int64{ + crypto.SigTypeBLS: 16598605, + crypto.SigTypeSecp256k1: 1637292, + }, + + hashingBase: 31355, + computeUnsealedSectorCidBase: 98647, + verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used + verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ + abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { + flat: 123861062, + scale: 9226981, + }, + abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: { + flat: 748593537, + scale: 85639, + }, + abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: { + flat: 748593537, + scale: 85639, + }, + }, + verifyConsensusFault: 495422, + }, +} + +// PricelistByEpoch finds the latest prices for the given epoch +func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { + // since we are storing the prices as map or epoch to price + // we need to get the price with the highest epoch that is lower or equal to the `epoch` arg + bestEpoch := abi.ChainEpoch(0) + bestPrice := prices[bestEpoch] + for e, pl := range prices { + // if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch` + if e > bestEpoch && e <= epoch { + bestEpoch = e + bestPrice = pl + } + } + if bestPrice == nil { + panic(fmt.Sprintf("bad setup: no gas prices available for epoch %d", epoch)) + } + return bestPrice +} + +type pricedSyscalls struct { + under vmr.Syscalls + pl Pricelist + chargeGas func(GasCharge) +} + +// Verifies that a signature is valid for an address and plaintext. +func (ps pricedSyscalls) VerifySignature(signature crypto.Signature, signer addr.Address, plaintext []byte) error { + c, err := ps.pl.OnVerifySignature(signature.Type, len(plaintext)) + if err != nil { + return err + } + ps.chargeGas(c) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifySignature(signature, signer, plaintext) +} + +// Hashes input data using blake2b with 256 bit output. +func (ps pricedSyscalls) HashBlake2b(data []byte) [32]byte { + ps.chargeGas(ps.pl.OnHashing(len(data))) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.HashBlake2b(data) +} + +// Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. +func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { + ps.chargeGas(ps.pl.OnComputeUnsealedSectorCid(reg, pieces)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.ComputeUnsealedSectorCID(reg, pieces) +} + +// Verifies a sector seal proof. +func (ps pricedSyscalls) VerifySeal(vi proof.SealVerifyInfo) error { + ps.chargeGas(ps.pl.OnVerifySeal(vi)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifySeal(vi) +} + +// Verifies a proof of spacetime. +func (ps pricedSyscalls) VerifyPoSt(vi proof.WindowPoStVerifyInfo) error { + ps.chargeGas(ps.pl.OnVerifyPost(vi)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyPoSt(vi) +} + +// Verifies that two block headers provide proof of a consensus fault: +// - both headers mined by the same actor +// - headers are different +// - first header is of the same or lower epoch as the second +// - at least one of the headers appears in the current chain at or after epoch `earliest` +// - the headers provide evidence of a fault (see the spec for the different fault types). +// The parameters are all serialized block headers. The third "extra" parameter is consulted only for +// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the +// blocks in the parent of h2 (i.e. h2's grandparent). +// Returns nil and an error if the headers don't prove a fault. +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*runtime.ConsensusFault, error) { + ps.chargeGas(ps.pl.OnVerifyConsensusFault()) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyConsensusFault(h1, h2, extra) +} + +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) { + count := int64(0) + for _, svis := range inp { + count += int64(len(svis)) + } + + gasChargeSum := newGasCharge("BatchVerifySeals", 0, 0) + gasChargeSum = gasChargeSum.WithExtra(count).WithVirtual(15075005*count+899741502, 0) + ps.chargeGas(gasChargeSum) // real gas charged by actors + defer ps.chargeGas(gasOnActorExec) + + return ps.under.BatchVerifySeals(inp) +} diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go new file mode 100644 index 000000000..e5ded440e --- /dev/null +++ b/chain/vm/gas_v0.go @@ -0,0 +1,215 @@ +package vm + +import ( + "fmt" + + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/actors/builtin" +) + +type scalingCost struct { + flat int64 + scale int64 +} + +type pricelistV0 struct { + /////////////////////////////////////////////////////////////////////////// + // System operations + /////////////////////////////////////////////////////////////////////////// + + // Gas cost charged to the originator of an on-chain message (regardless of + // whether it succeeds or fails in application) is given by: + // OnChainMessageBase + len(serialized message)*OnChainMessagePerByte + // Together, these account for the cost of message propagation and validation, + // up to but excluding any actual processing by the VM. + // This is the cost a block producer burns when including an invalid message. + onChainMessageComputeBase int64 + onChainMessageStorageBase int64 + onChainMessageStoragePerByte int64 + + // Gas cost charged to the originator of a non-nil return value produced + // by an on-chain message is given by: + // len(return value)*OnChainReturnValuePerByte + onChainReturnValuePerByte int64 + + // Gas cost for any message send execution(including the top-level one + // initiated by an on-chain message). + // This accounts for the cost of loading sender and receiver actors and + // (for top-level messages) incrementing the sender's sequence number. + // Load and store of actor sub-state is charged separately. + sendBase int64 + + // Gas cost charged, in addition to SendBase, if a message send + // is accompanied by any nonzero currency amount. + // Accounts for writing receiver's new balance (the sender's state is + // already accounted for). + sendTransferFunds int64 + + // Gsa cost charged, in addition to SendBase, if message only transfers funds. + sendTransferOnlyPremium int64 + + // Gas cost charged, in addition to SendBase, if a message invokes + // a method on the receiver. + // Accounts for the cost of loading receiver code and method dispatch. + sendInvokeMethod int64 + + // Gas cost for any Get operation to the IPLD store + // in the runtime VM context. + ipldGetBase int64 + + // Gas cost (Base + len*PerByte) for any Put operation to the IPLD store + // in the runtime VM context. + // + // Note: these costs should be significantly higher than the costs for Get + // operations, since they reflect not only serialization/deserialization + // but also persistent storage of chain data. + ipldPutBase int64 + ipldPutPerByte int64 + + // Gas cost for creating a new actor (via InitActor's Exec method). + // + // Note: this costs assume that the extra will be partially or totally refunded while + // the base is covering for the put. + createActorCompute int64 + createActorStorage int64 + + // Gas cost for deleting an actor. + // + // Note: this partially refunds the create cost to incentivise the deletion of the actors. + deleteActor int64 + + verifySignature map[crypto.SigType]int64 + + hashingBase int64 + + computeUnsealedSectorCidBase int64 + verifySealBase int64 + verifyPostLookup map[abi.RegisteredPoStProof]scalingCost + verifyConsensusFault int64 +} + +var _ Pricelist = (*pricelistV0)(nil) + +// OnChainMessage returns the gas used for storing a message of a given size in the chain. +func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge { + return newGasCharge("OnChainMessage", pl.onChainMessageComputeBase, + pl.onChainMessageStorageBase+pl.onChainMessageStoragePerByte*int64(msgSize)) +} + +// OnChainReturnValue returns the gas used for storing the response of a message in the chain. +func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge { + return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte) +} + +// OnMethodInvocation returns the gas used when invoking a method. +func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge { + ret := pl.sendBase + extra := "" + + if big.Cmp(value, abi.NewTokenAmount(0)) != 0 { + ret += pl.sendTransferFunds + if methodNum == builtin.MethodSend { + // transfer only + ret += pl.sendTransferOnlyPremium + } + extra += "t" + } + + if methodNum != builtin.MethodSend { + extra += "i" + // running actors is cheaper becase we hand over to actors + ret += pl.sendInvokeMethod + } + return newGasCharge("OnMethodInvocation", ret, 0).WithExtra(extra) +} + +// OnIpldGet returns the gas used for storing an object +func (pl *pricelistV0) OnIpldGet() GasCharge { + return newGasCharge("OnIpldGet", pl.ipldGetBase, 0) +} + +// OnIpldPut returns the gas used for storing an object +func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge { + return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte). + WithExtra(dataSize) +} + +// OnCreateActor returns the gas used for creating an actor +func (pl *pricelistV0) OnCreateActor() GasCharge { + return newGasCharge("OnCreateActor", pl.createActorCompute, pl.createActorStorage) +} + +// OnDeleteActor returns the gas used for deleting an actor +func (pl *pricelistV0) OnDeleteActor() GasCharge { + return newGasCharge("OnDeleteActor", 0, pl.deleteActor) +} + +// OnVerifySignature + +func (pl *pricelistV0) OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) { + cost, ok := pl.verifySignature[sigType] + if !ok { + return GasCharge{}, fmt.Errorf("cost function for signature type %d not supported", sigType) + } + + sigName, _ := sigType.Name() + return newGasCharge("OnVerifySignature", cost, 0). + WithExtra(map[string]interface{}{ + "type": sigName, + "size": planTextSize, + }), nil +} + +// OnHashing +func (pl *pricelistV0) OnHashing(dataSize int) GasCharge { + return newGasCharge("OnHashing", pl.hashingBase, 0).WithExtra(dataSize) +} + +// OnComputeUnsealedSectorCid +func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge { + return newGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0) +} + +// OnVerifySeal +func (pl *pricelistV0) OnVerifySeal(info proof.SealVerifyInfo) GasCharge { + // TODO: this needs more cost tunning, check with @lotus + // this is not used + return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) +} + +// OnVerifyPost +func (pl *pricelistV0) OnVerifyPost(info proof.WindowPoStVerifyInfo) GasCharge { + sectorSize := "unknown" + var proofType abi.RegisteredPoStProof + + if len(info.Proofs) != 0 { + proofType = info.Proofs[0].PoStProof + ss, err := info.Proofs[0].PoStProof.SectorSize() + if err == nil { + sectorSize = ss.ShortString() + } + } + + cost, ok := pl.verifyPostLookup[proofType] + if !ok { + cost = pl.verifyPostLookup[abi.RegisteredPoStProof_StackedDrgWindow512MiBV1] + } + + gasUsed := cost.flat + int64(len(info.ChallengedSectors))*cost.scale + gasUsed /= 2 // XXX: this is an artificial discount + + return newGasCharge("OnVerifyPost", gasUsed, 0). + WithExtra(map[string]interface{}{ + "type": sectorSize, + "size": len(info.ChallengedSectors), + }) +} + +// OnVerifyConsensusFault +func (pl *pricelistV0) OnVerifyConsensusFault() GasCharge { + return newGasCharge("OnVerifyConsensusFault", pl.verifyConsensusFault, 0) +} diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 0a792dbd8..0a83e273d 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -10,59 +10,75 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/chain/actors" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + account0 "github.com/filecoin-project/specs-actors/actors/builtin/account" + cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron" + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + system0 "github.com/filecoin-project/specs-actors/actors/builtin/system" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + vmr "github.com/filecoin-project/specs-actors/actors/runtime" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" ) -type invoker struct { +type Invoker struct { builtInCode map[cid.Cid]nativeCode builtInState map[cid.Cid]reflect.Type } -type invokeFunc func(act *types.Actor, vmctx types.VMContext, params []byte) ([]byte, aerrors.ActorError) +type invokeFunc func(rt vmr.Runtime, params []byte) ([]byte, aerrors.ActorError) type nativeCode []invokeFunc -func newInvoker() *invoker { - inv := &invoker{ +func NewInvoker() *Invoker { + inv := &Invoker{ builtInCode: make(map[cid.Cid]nativeCode), builtInState: make(map[cid.Cid]reflect.Type), } // add builtInCode using: register(cid, singleton) - inv.register(actors.InitCodeCid, actors.InitActor{}, actors.InitActorState{}) - inv.register(actors.CronCodeCid, actors.CronActor{}, actors.CronActorState{}) - inv.register(actors.StoragePowerCodeCid, actors.StoragePowerActor{}, actors.StoragePowerState{}) - inv.register(actors.StorageMarketCodeCid, actors.StorageMarketActor{}, actors.StorageMarketState{}) - inv.register(actors.StorageMinerCodeCid, actors.StorageMinerActor{}, actors.StorageMinerActorState{}) - inv.register(actors.MultisigCodeCid, actors.MultiSigActor{}, actors.MultiSigActorState{}) - inv.register(actors.PaymentChannelCodeCid, actors.PaymentChannelActor{}, actors.PaymentChannelActorState{}) + // NETUPGRADE: register code IDs for v2, etc. + inv.Register(builtin0.SystemActorCodeID, system0.Actor{}, abi.EmptyValue{}) + inv.Register(builtin0.InitActorCodeID, init0.Actor{}, init0.State{}) + inv.Register(builtin0.RewardActorCodeID, reward0.Actor{}, reward0.State{}) + inv.Register(builtin0.CronActorCodeID, cron0.Actor{}, cron0.State{}) + inv.Register(builtin0.StoragePowerActorCodeID, power0.Actor{}, power0.State{}) + inv.Register(builtin0.StorageMarketActorCodeID, market0.Actor{}, market0.State{}) + inv.Register(builtin0.StorageMinerActorCodeID, miner0.Actor{}, miner0.State{}) + inv.Register(builtin0.MultisigActorCodeID, msig0.Actor{}, msig0.State{}) + inv.Register(builtin0.PaymentChannelActorCodeID, paych0.Actor{}, paych0.State{}) + inv.Register(builtin0.VerifiedRegistryActorCodeID, verifreg0.Actor{}, verifreg0.State{}) + inv.Register(builtin0.AccountActorCodeID, account0.Actor{}, account0.State{}) return inv } -func (inv *invoker) Invoke(act *types.Actor, vmctx types.VMContext, method uint64, params []byte) ([]byte, aerrors.ActorError) { +func (inv *Invoker) Invoke(codeCid cid.Cid, rt vmr.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { - if act.Code == actors.AccountCodeCid { - return nil, aerrors.Newf(254, "cannot invoke methods on account actors") - } - - code, ok := inv.builtInCode[act.Code] + code, ok := inv.builtInCode[codeCid] if !ok { - log.Errorf("no code for actor %s (Addr: %s)", act.Code, vmctx.Message().To) - return nil, aerrors.Newf(255, "no code for actor %s(%d)(%s)", act.Code, method, hex.EncodeToString(params)) + log.Errorf("no code for actor %s (Addr: %s)", codeCid, rt.Receiver()) + return nil, aerrors.Newf(exitcode.SysErrorIllegalActor, "no code for actor %s(%d)(%s)", codeCid, method, hex.EncodeToString(params)) } - if method >= uint64(len(code)) || code[method] == nil { - return nil, aerrors.Newf(255, "no method %d on actor", method) + if method >= abi.MethodNum(len(code)) || code[method] == nil { + return nil, aerrors.Newf(exitcode.SysErrInvalidMethod, "no method %d on actor", method) } - return code[method](act, vmctx, params) + return code[method](rt, params) } -func (inv *invoker) register(c cid.Cid, instance Invokee, state interface{}) { +func (inv *Invoker) Register(c cid.Cid, instance Invokee, state interface{}) { code, err := inv.transform(instance) if err != nil { - panic(err) + panic(xerrors.Errorf("%s: %w", string(c.Hash()), err)) } inv.builtInCode[c] = code inv.builtInState[c] = reflect.TypeOf(state) @@ -72,10 +88,7 @@ type Invokee interface { Exports() []interface{} } -var tVMContext = reflect.TypeOf((*types.VMContext)(nil)).Elem() -var tAError = reflect.TypeOf((*aerrors.ActorError)(nil)).Elem() - -func (*invoker) transform(instance Invokee) (nativeCode, error) { +func (*Invoker) transform(instance Invokee) (nativeCode, error) { itype := reflect.TypeOf(instance) exports := instance.Exports() for i, m := range exports { @@ -84,66 +97,70 @@ func (*invoker) transform(instance Invokee) (nativeCode, error) { str := fmt.Sprintf(format, args...) return fmt.Errorf("transform(%s) export(%d): %s", itype.Name(), i, str) } + if m == nil { continue } + meth := reflect.ValueOf(m) t := meth.Type() if t.Kind() != reflect.Func { return nil, newErr("is not a function") } - if t.NumIn() != 3 { + if t.NumIn() != 2 { return nil, newErr("wrong number of inputs should be: " + - "*types.Actor, *VMContext, ") + "vmr.Runtime, ") } - if t.In(0) != reflect.TypeOf(&types.Actor{}) { - return nil, newErr("first arguemnt should be *types.Actor") + if t.In(0) != reflect.TypeOf((*vmr.Runtime)(nil)).Elem() { + return nil, newErr("first arguemnt should be vmr.Runtime") } - if t.In(1) != tVMContext { - return nil, newErr("second argument should be types.VMContext") + if t.In(1).Kind() != reflect.Ptr { + return nil, newErr("second argument should be of kind reflect.Ptr") } - if t.In(2).Kind() != reflect.Ptr { - return nil, newErr("parameter has to be a pointer to parameter, is: %s", - t.In(2).Kind()) - } - - if t.NumOut() != 2 { + if t.NumOut() != 1 { return nil, newErr("wrong number of outputs should be: " + - "(InvokeRet, error)") + "cbg.CBORMarshaler") } - if t.Out(0) != reflect.TypeOf([]byte{}) { - return nil, newErr("first output should be slice of bytes") + o0 := t.Out(0) + if !o0.Implements(reflect.TypeOf((*cbg.CBORMarshaler)(nil)).Elem()) { + return nil, newErr("output needs to implement cgb.CBORMarshaler") } - if !t.Out(1).Implements(tAError) { - return nil, newErr("second output should be ActorError type") - } - } code := make(nativeCode, len(exports)) for id, m := range exports { + if m == nil { + continue + } meth := reflect.ValueOf(m) code[id] = reflect.MakeFunc(reflect.TypeOf((invokeFunc)(nil)), func(in []reflect.Value) []reflect.Value { - paramT := meth.Type().In(2).Elem() + paramT := meth.Type().In(1).Elem() param := reflect.New(paramT) - inBytes := in[2].Interface().([]byte) - if len(inBytes) > 0 { - if err := DecodeParams(inBytes, param.Interface()); err != nil { - aerr := aerrors.Absorb(err, 1, "failed to decode parameters") - return []reflect.Value{ - reflect.ValueOf([]byte{}), - // Below is a hack, fixed in Go 1.13 - // https://git.io/fjXU6 - reflect.ValueOf(&aerr).Elem(), - } + inBytes := in[1].Interface().([]byte) + if err := DecodeParams(inBytes, param.Interface()); err != nil { + aerr := aerrors.Absorb(err, 1, "failed to decode parameters") + return []reflect.Value{ + reflect.ValueOf([]byte{}), + // Below is a hack, fixed in Go 1.13 + // https://git.io/fjXU6 + reflect.ValueOf(&aerr).Elem(), } } - - return meth.Call([]reflect.Value{ - in[0], in[1], param, + rt := in[0].Interface().(*Runtime) + rval, aerror := rt.shimCall(func() interface{} { + ret := meth.Call([]reflect.Value{ + reflect.ValueOf(rt), + param, + }) + return ret[0].Interface() }) + + return []reflect.Value{ + reflect.ValueOf(&rval).Elem(), + reflect.ValueOf(&aerror).Elem(), + } }).Interface().(invokeFunc) } @@ -160,7 +177,11 @@ func DecodeParams(b []byte, out interface{}) error { } func DumpActorState(code cid.Cid, b []byte) (interface{}, error) { - i := newInvoker() // TODO: register builtins in init block + if code == builtin0.AccountActorCodeID { // Account code special case + return nil, nil + } + + i := NewInvoker() // TODO: register builtins in init block typ, ok := i.builtInState[code] if !ok { @@ -174,7 +195,7 @@ func DumpActorState(code cid.Cid, b []byte) (interface{}, error) { } if err := um.UnmarshalCBOR(bytes.NewReader(b)); err != nil { - return nil, err + return nil, xerrors.Errorf("unmarshaling actor state: %w", err) } return rv.Elem().Interface(), nil diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go index e326d0e70..3744aa8d2 100644 --- a/chain/vm/invoker_test.go +++ b/chain/vm/invoker_test.go @@ -5,13 +5,16 @@ import ( "io" "testing" + "github.com/filecoin-project/go-state-types/abi" + cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/assert" cbg "github.com/whyrusleeping/cbor-gen" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/actors/runtime" ) type basicContract struct{} @@ -58,23 +61,23 @@ func (b basicContract) Exports() []interface{} { } } -func (basicContract) InvokeSomething0(act *types.Actor, vmctx types.VMContext, - params *basicParams) ([]byte, aerrors.ActorError) { - return nil, aerrors.New(params.B, "params.B") +func (basicContract) InvokeSomething0(rt runtime.Runtime, params *basicParams) *abi.EmptyValue { + rt.Abortf(exitcode.ExitCode(params.B), "params.B") + return nil } -func (basicContract) BadParam(act *types.Actor, vmctx types.VMContext, - params *basicParams) ([]byte, aerrors.ActorError) { - return nil, aerrors.New(255, "bad params") +func (basicContract) BadParam(rt runtime.Runtime, params *basicParams) *abi.EmptyValue { + rt.Abortf(255, "bad params") + return nil } -func (basicContract) InvokeSomething10(act *types.Actor, vmctx types.VMContext, - params *basicParams) ([]byte, aerrors.ActorError) { - return nil, aerrors.New(params.B+10, "params.B") +func (basicContract) InvokeSomething10(rt runtime.Runtime, params *basicParams) *abi.EmptyValue { + rt.Abortf(exitcode.ExitCode(params.B+10), "params.B") + return nil } func TestInvokerBasic(t *testing.T) { - inv := invoker{} + inv := Invoker{} code, err := inv.transform(basicContract{}) assert.NoError(t, err) @@ -82,9 +85,9 @@ func TestInvokerBasic(t *testing.T) { bParam, err := actors.SerializeParams(&basicParams{B: 1}) assert.NoError(t, err) - _, aerr := code[0](nil, &VMContext{}, bParam) + _, aerr := code[0](&Runtime{}, bParam) - assert.Equal(t, byte(1), aerrors.RetCode(aerr), "return code should be 1") + assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") if aerrors.IsFatal(aerr) { t.Fatal("err should not be fatal") } @@ -94,17 +97,17 @@ func TestInvokerBasic(t *testing.T) { bParam, err := actors.SerializeParams(&basicParams{B: 2}) assert.NoError(t, err) - _, aerr := code[10](nil, &VMContext{}, bParam) - assert.Equal(t, byte(12), aerrors.RetCode(aerr), "return code should be 12") + _, aerr := code[10](&Runtime{}, bParam) + assert.Equal(t, exitcode.ExitCode(12), aerrors.RetCode(aerr), "return code should be 12") if aerrors.IsFatal(aerr) { t.Fatal("err should not be fatal") } } - _, aerr := code[1](nil, &VMContext{}, []byte{99}) + _, aerr := code[1](&Runtime{}, []byte{99}) if aerrors.IsFatal(aerr) { t.Fatal("err should not be fatal") } - assert.Equal(t, byte(1), aerrors.RetCode(aerr), "return code should be 1") + assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") } diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index a84387321..43d2f9431 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -3,22 +3,22 @@ package vm import ( "context" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/ipfs/go-cid" - dstore "github.com/ipfs/go-datastore" - hamt "github.com/ipfs/go-hamt-ipld" - bstore "github.com/ipfs/go-ipfs-blockstore" + cbor "github.com/ipfs/go-ipld-cbor" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" ) func init() { - bs := bstore.NewBlockstore(dstore.NewMapDatastore()) - cst := hamt.CSTFromBstore(bs) - emptyobject, err := cst.Put(context.TODO(), map[string]string{}) + cst := cbor.NewMemCborStore() + emptyobject, err := cst.Put(context.TODO(), []struct{}{}) if err != nil { panic(err) } @@ -28,58 +28,75 @@ func init() { var EmptyObjectCid cid.Cid -func TryCreateAccountActor(st *state.StateTree, addr address.Address) (*types.Actor, aerrors.ActorError) { - act, err := makeActor(st, addr) - if err != nil { +// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses. +func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, aerrors.ActorError) { + if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil { return nil, err } - if _, err := st.RegisterNewAddress(addr, act); err != nil { + addrID, err := rt.state.RegisterNewAddress(addr) + if err != nil { return nil, aerrors.Escalate(err, "registering actor address") } + act, aerr := makeActor(addr) + if aerr != nil { + return nil, aerr + } + + if err := rt.state.SetActor(addrID, act); err != nil { + return nil, aerrors.Escalate(err, "creating new actor failed") + } + + p, err := actors.SerializeParams(&addr) + if err != nil { + return nil, aerrors.Escalate(err, "couldn't serialize params for actor construction") + } + // call constructor on account + + _, aerr = rt.internalSend(builtin.SystemActorAddr, addrID, builtin.MethodsAccount.Constructor, big.Zero(), p) + if aerr != nil { + return nil, aerrors.Wrap(aerr, "failed to invoke account constructor") + } + + act, err = rt.state.GetActor(addrID) + if err != nil { + return nil, aerrors.Escalate(err, "loading newly created actor failed") + } return act, nil } -func makeActor(st *state.StateTree, addr address.Address) (*types.Actor, aerrors.ActorError) { +func makeActor(addr address.Address) (*types.Actor, aerrors.ActorError) { switch addr.Protocol() { case address.BLS: - return NewBLSAccountActor(st, addr) + return NewBLSAccountActor(), nil case address.SECP256K1: - return NewSecp256k1AccountActor(st, addr) + return NewSecp256k1AccountActor(), nil case address.ID: - return nil, aerrors.Newf(1, "no actor with given ID: %s", addr) + return nil, aerrors.Newf(exitcode.SysErrInvalidReceiver, "no actor with given ID: %s", addr) case address.Actor: - return nil, aerrors.Newf(1, "no such actor: %s", addr) + return nil, aerrors.Newf(exitcode.SysErrInvalidReceiver, "no such actor: %s", addr) default: - return nil, aerrors.Newf(1, "address has unsupported protocol: %d", addr.Protocol()) + return nil, aerrors.Newf(exitcode.SysErrInvalidReceiver, "address has unsupported protocol: %d", addr.Protocol()) } } -func NewBLSAccountActor(st *state.StateTree, addr address.Address) (*types.Actor, aerrors.ActorError) { - var acstate actors.AccountActorState - acstate.Address = addr - - c, err := st.Store.Put(context.TODO(), &acstate) - if err != nil { - return nil, aerrors.Escalate(err, "serializing account actor state") - } - +func NewBLSAccountActor() *types.Actor { nact := &types.Actor{ - Code: actors.AccountCodeCid, - Balance: types.NewInt(0), - Head: c, - } - - return nact, nil -} - -func NewSecp256k1AccountActor(st *state.StateTree, addr address.Address) (*types.Actor, aerrors.ActorError) { - nact := &types.Actor{ - Code: actors.AccountCodeCid, + Code: builtin.AccountActorCodeID, Balance: types.NewInt(0), Head: EmptyObjectCid, } - return nact, nil + return nact +} + +func NewSecp256k1AccountActor() *types.Actor { + nact := &types.Actor{ + Code: builtin.AccountActorCodeID, + Balance: types.NewInt(0), + Head: EmptyObjectCid, + } + + return nact } diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go new file mode 100644 index 000000000..156d57282 --- /dev/null +++ b/chain/vm/runtime.go @@ -0,0 +1,561 @@ +package vm + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + gruntime "runtime" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/filecoin-project/specs-actors/actors/builtin" + rt0 "github.com/filecoin-project/specs-actors/actors/runtime" + "github.com/ipfs/go-cid" + ipldcbor "github.com/ipfs/go-ipld-cbor" + "go.opencensus.io/trace" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/types" +) + +type Runtime struct { + types.Message + rt0.Syscalls + + ctx context.Context + + vm *VM + state *state.StateTree + height abi.ChainEpoch + cst ipldcbor.IpldStore + pricelist Pricelist + + gasAvailable int64 + gasUsed int64 + + // address that started invoke chain + origin address.Address + originNonce uint64 + + executionTrace types.ExecutionTrace + numActorsCreated uint64 + allowInternal bool + callerValidated bool + lastGasChargeTime time.Time + lastGasCharge *types.GasTrace +} + +func (rt *Runtime) NetworkVersion() network.Version { + return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch()) +} + +func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount { + cs, err := rt.vm.GetCircSupply(rt.ctx) + if err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to get total circ supply: %s", err) + } + + return cs +} + +func (rt *Runtime) ResolveAddress(addr address.Address) (ret address.Address, ok bool) { + r, err := rt.state.LookupID(addr) + if err != nil { + if xerrors.Is(err, types.ErrActorNotFound) { + return address.Undef, false + } + panic(aerrors.Fatalf("failed to resolve address %s: %s", addr, err)) + } + return r, true +} + +type notFoundErr interface { + IsNotFound() bool +} + +func (rt *Runtime) StoreGet(c cid.Cid, o cbor.Unmarshaler) bool { + if err := rt.cst.Get(context.TODO(), c, o); err != nil { + var nfe notFoundErr + if xerrors.As(err, &nfe) && nfe.IsNotFound() { + if xerrors.As(err, new(ipldcbor.SerializationError)) { + panic(aerrors.Newf(exitcode.ErrSerialization, "failed to unmarshal cbor object %s", err)) + } + return false + } + + panic(aerrors.Fatalf("failed to get cbor object %s: %s", c, err)) + } + return true +} + +func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid { + c, err := rt.cst.Put(context.TODO(), x) + if err != nil { + if xerrors.As(err, new(ipldcbor.SerializationError)) { + panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err)) + } + panic(aerrors.Fatalf("failed to put cbor object: %s", err)) + } + return c +} + +var _ rt0.Runtime = (*Runtime)(nil) + +func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { + defer func() { + if r := recover(); r != nil { + if ar, ok := r.(aerrors.ActorError); ok { + log.Warnf("VM.Call failure: %+v", ar) + aerr = ar + return + } + //log.Desugar().WithOptions(zap.AddStacktrace(zapcore.ErrorLevel)). + //Sugar().Errorf("spec actors failure: %s", r) + log.Errorf("spec actors failure: %s", r) + aerr = aerrors.Newf(1, "spec actors failure: %s", r) + } + }() + + ret := f() + + if !rt.callerValidated { + rt.Abortf(exitcode.SysErrorIllegalActor, "Caller MUST be validated during method execution") + } + + switch ret := ret.(type) { + case []byte: + return ret, nil + case *abi.EmptyValue: + return nil, nil + case cbor.Marshaler: + buf := new(bytes.Buffer) + if err := ret.MarshalCBOR(buf); err != nil { + return nil, aerrors.Absorb(err, 2, "failed to marshal response to cbor") + } + return buf.Bytes(), nil + case nil: + return nil, nil + default: + return nil, aerrors.New(3, "could not determine type for response from call") + } +} + +func (rt *Runtime) ValidateImmediateCallerAcceptAny() { + rt.abortIfAlreadyValidated() + return +} + +func (rt *Runtime) CurrentBalance() abi.TokenAmount { + b, err := rt.GetBalance(rt.Receiver()) + if err != nil { + rt.Abortf(err.RetCode(), "get current balance: %v", err) + } + return b +} + +func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) { + act, err := rt.state.GetActor(addr) + if err != nil { + if xerrors.Is(err, types.ErrActorNotFound) { + return cid.Undef, false + } + + panic(aerrors.Fatalf("failed to get actor: %s", err)) + } + + return act.Code, true +} + +func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { + res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy) + if err != nil { + panic(aerrors.Fatalf("could not get randomness: %s", err)) + } + return res +} + +func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { + res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy) + if err != nil { + panic(aerrors.Fatalf("could not get randomness: %s", err)) + } + return res +} + +func (rt *Runtime) NewActorAddress() address.Address { + var b bytes.Buffer + oa, _ := ResolveToKeyAddr(rt.vm.cstate, rt.vm.cst, rt.origin) + if err := oa.MarshalCBOR(&b); err != nil { // todo: spec says cbor; why not just bytes? + panic(aerrors.Fatalf("writing caller address into a buffer: %v", err)) + } + + if err := binary.Write(&b, binary.BigEndian, rt.originNonce); err != nil { + panic(aerrors.Fatalf("writing nonce address into a buffer: %v", err)) + } + if err := binary.Write(&b, binary.BigEndian, rt.numActorsCreated); err != nil { // TODO: expose on vm + panic(aerrors.Fatalf("writing callSeqNum address into a buffer: %v", err)) + } + addr, err := address.NewActorAddress(b.Bytes()) + if err != nil { + panic(aerrors.Fatalf("create actor address: %v", err)) + } + + rt.incrementNumActorsCreated() + return addr +} + +func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) { + if !builtin.IsBuiltinActor(codeID) { + rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.") + } + + if builtin.IsSingletonActor(codeID) { + rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only have one instance of singleton actors.") + } + + _, err := rt.state.GetActor(address) + if err == nil { + rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") + } + + rt.chargeGas(rt.Pricelist().OnCreateActor()) + + err = rt.state.SetActor(address, &types.Actor{ + Code: codeID, + Head: EmptyObjectCid, + Nonce: 0, + Balance: big.Zero(), + }) + if err != nil { + panic(aerrors.Fatalf("creating actor entry: %v", err)) + } + _ = rt.chargeGasSafe(gasOnActorExec) +} + +// DeleteActor deletes the executing actor from the state tree, transferring +// any balance to beneficiary. +// Aborts if the beneficiary does not exist. +// May only be called by the actor itself. +func (rt *Runtime) DeleteActor(beneficiary address.Address) { + rt.chargeGas(rt.Pricelist().OnDeleteActor()) + act, err := rt.state.GetActor(rt.Receiver()) + if err != nil { + if xerrors.Is(err, types.ErrActorNotFound) { + rt.Abortf(exitcode.SysErrorIllegalActor, "failed to load actor in delete actor: %s", err) + } + panic(aerrors.Fatalf("failed to get actor: %s", err)) + } + if !act.Balance.IsZero() { + // Transfer the executing actor's balance to the beneficiary + if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil { + panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err)) + } + } + + // Delete the executing actor + if err := rt.state.DeleteActor(rt.Receiver()); err != nil { + panic(aerrors.Fatalf("failed to delete actor: %s", err)) + } + _ = rt.chargeGasSafe(gasOnActorExec) +} + +func (rt *Runtime) StartSpan(name string) func() { + panic("implement me") +} + +func (rt *Runtime) ValidateImmediateCallerIs(as ...address.Address) { + rt.abortIfAlreadyValidated() + imm := rt.Caller() + + for _, a := range as { + if imm == a { + return + } + } + rt.Abortf(exitcode.SysErrForbidden, "caller %s is not one of %s", rt.Caller(), as) +} + +func (rt *Runtime) Context() context.Context { + return rt.ctx +} + +func (rt *Runtime) Abortf(code exitcode.ExitCode, msg string, args ...interface{}) { + log.Warnf("Abortf: " + fmt.Sprintf(msg, args...)) + panic(aerrors.NewfSkip(2, code, msg, args...)) +} + +func (rt *Runtime) AbortStateMsg(msg string) { + panic(aerrors.NewfSkip(3, 101, msg)) +} + +func (rt *Runtime) ValidateImmediateCallerType(ts ...cid.Cid) { + rt.abortIfAlreadyValidated() + callerCid, ok := rt.GetActorCodeCID(rt.Caller()) + if !ok { + panic(aerrors.Fatalf("failed to lookup code cid for caller")) + } + for _, t := range ts { + if t == callerCid { + return + } + } + rt.Abortf(exitcode.SysErrForbidden, "caller cid type %q was not one of %v", callerCid, ts) +} + +func (rt *Runtime) CurrEpoch() abi.ChainEpoch { + return rt.height +} + +func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode { + if !rt.allowInternal { + rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed") + } + var params []byte + if m != nil { + buf := new(bytes.Buffer) + if err := m.MarshalCBOR(buf); err != nil { + rt.Abortf(exitcode.ErrSerialization, "failed to marshal input parameters: %s", err) + } + params = buf.Bytes() + } + + ret, err := rt.internalSend(rt.Receiver(), to, method, value, params) + if err != nil { + if err.IsFatal() { + panic(err) + } + log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err) + return err.RetCode() + } + _ = rt.chargeGasSafe(gasOnActorExec) + + if err := out.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { + rt.Abortf(exitcode.ErrSerialization, "failed to unmarshal return value: %s", err) + } + return 0 +} + +func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) { + start := build.Clock.Now() + ctx, span := trace.StartSpan(rt.ctx, "vmc.Send") + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes( + trace.StringAttribute("to", to.String()), + trace.Int64Attribute("method", int64(method)), + trace.StringAttribute("value", value.String()), + ) + } + + msg := &types.Message{ + From: from, + To: to, + Method: method, + Value: value, + Params: params, + GasLimit: rt.gasAvailable, + } + + st := rt.state + if err := st.Snapshot(ctx); err != nil { + return nil, aerrors.Fatalf("snapshot failed: %s", err) + } + defer st.ClearSnapshot() + + ret, errSend, subrt := rt.vm.send(ctx, msg, rt, nil, start) + if errSend != nil { + if errRevert := st.Revert(); errRevert != nil { + return nil, aerrors.Escalate(errRevert, "failed to revert state tree after failed subcall") + } + } + + if subrt != nil { + rt.numActorsCreated = subrt.numActorsCreated + rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace) + } + return ret, errSend +} + +func (rt *Runtime) StateCreate(obj cbor.Marshaler) { + c := rt.StorePut(obj) + err := rt.stateCommit(EmptyObjectCid, c) + if err != nil { + panic(fmt.Errorf("failed to commit state after creating object: %w", err)) + } +} + +func (rt *Runtime) StateReadonly(obj cbor.Unmarshaler) { + act, err := rt.state.GetActor(rt.Receiver()) + if err != nil { + rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err) + } + rt.StoreGet(act.Head, obj) +} + +func (rt *Runtime) StateTransaction(obj cbor.Er, f func()) { + if obj == nil { + rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()") + } + + act, err := rt.state.GetActor(rt.Receiver()) + if err != nil { + rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err) + } + baseState := act.Head + rt.StoreGet(baseState, obj) + + rt.allowInternal = false + f() + rt.allowInternal = true + + c := rt.StorePut(obj) + + err = rt.stateCommit(baseState, c) + if err != nil { + panic(fmt.Errorf("failed to commit state after transaction: %w", err)) + } +} + +func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) { + act, err := rt.state.GetActor(a) + switch err { + default: + return types.EmptyInt, aerrors.Escalate(err, "failed to look up actor balance") + case types.ErrActorNotFound: + return types.NewInt(0), nil + case nil: + return act.Balance, nil + } +} + +func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError { + // TODO: we can make this more efficient in the future... + act, err := rt.state.GetActor(rt.Receiver()) + if err != nil { + return aerrors.Escalate(err, "failed to get actor to commit state") + } + + if act.Head != oldh { + return aerrors.Fatal("failed to update, inconsistent base reference") + } + + act.Head = newh + + if err := rt.state.SetActor(rt.Receiver(), act); err != nil { + return aerrors.Fatalf("failed to set actor in commit state: %s", err) + } + + return nil +} + +func (rt *Runtime) finilizeGasTracing() { + if rt.lastGasCharge != nil { + rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime) + } +} + +// ChargeGas is spec actors function +func (rt *Runtime) ChargeGas(name string, compute int64, virtual int64) { + err := rt.chargeGasInternal(newGasCharge(name, compute, 0).WithVirtual(virtual, 0), 1) + if err != nil { + panic(err) + } +} + +func (rt *Runtime) chargeGas(gas GasCharge) { + err := rt.chargeGasInternal(gas, 1) + if err != nil { + panic(err) + } +} + +func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) { + return func(gas GasCharge) { + err := rt.chargeGasInternal(gas, 1+skip) + if err != nil { + panic(err) + } + } + +} + +func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError { + toUse := gas.Total() + var callers [10]uintptr + cout := gruntime.Callers(2+skip, callers[:]) + + now := build.Clock.Now() + if rt.lastGasCharge != nil { + rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime) + } + + gasTrace := types.GasTrace{ + Name: gas.Name, + Extra: gas.Extra, + + TotalGas: toUse, + ComputeGas: gas.ComputeGas, + StorageGas: gas.StorageGas, + + TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti, + VirtualComputeGas: gas.VirtualCompute, + VirtualStorageGas: gas.VirtualStorage, + + Callers: callers[:cout], + } + rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace) + rt.lastGasChargeTime = now + rt.lastGasCharge = &gasTrace + + // overflow safe + if rt.gasUsed > rt.gasAvailable-toUse { + rt.gasUsed = rt.gasAvailable + return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d", + rt.gasUsed, rt.gasAvailable) + } + rt.gasUsed += toUse + return nil +} + +func (rt *Runtime) chargeGasSafe(gas GasCharge) aerrors.ActorError { + return rt.chargeGasInternal(gas, 1) +} + +func (rt *Runtime) Pricelist() Pricelist { + return rt.pricelist +} + +func (rt *Runtime) incrementNumActorsCreated() { + rt.numActorsCreated++ +} + +func (rt *Runtime) abortIfAlreadyValidated() { + if rt.callerValidated { + rt.Abortf(exitcode.SysErrorIllegalActor, "Method must validate caller identity exactly once") + } + rt.callerValidated = true +} + +func (rt *Runtime) Log(level rtt.LogLevel, msg string, args ...interface{}) { + switch level { + case rtt.DEBUG: + actorLog.Debugf(msg, args...) + case rtt.INFO: + actorLog.Infof(msg, args...) + case rtt.WARN: + actorLog.Warnf(msg, args...) + case rtt.ERROR: + actorLog.Errorf(msg, args...) + } +} diff --git a/chain/vm/runtime_test.go b/chain/vm/runtime_test.go new file mode 100644 index 000000000..c22a8b615 --- /dev/null +++ b/chain/vm/runtime_test.go @@ -0,0 +1,47 @@ +package vm + +import ( + "io" + "testing" + + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/lotus/chain/actors/aerrors" +) + +type NotAVeryGoodMarshaler struct{} + +func (*NotAVeryGoodMarshaler) MarshalCBOR(writer io.Writer) error { + return xerrors.Errorf("no") +} + +var _ cbg.CBORMarshaler = &NotAVeryGoodMarshaler{} + +func TestRuntimePutErrors(t *testing.T) { + defer func() { + err := recover() + if err == nil { + t.Fatal("expected non-nil recovery") + } + + aerr := err.(aerrors.ActorError) + if aerr.IsFatal() { + t.Fatal("expected non-fatal actor error") + } + + if aerr.RetCode() != exitcode.ErrSerialization { + t.Fatal("expected serialization error") + } + }() + + rt := Runtime{ + cst: cbor.NewCborStore(nil), + } + + rt.StorePut(&NotAVeryGoodMarshaler{}) + t.Error("expected panic") +} diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index 92f9ea269..a7f5dab0c 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -1,14 +1,299 @@ package vm import ( - "github.com/filecoin-project/lotus/chain/actors" + "bytes" + "context" + "fmt" + goruntime "runtime" + "sync" + + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/minio/blake2b-simd" + mh "github.com/multiformats/go-multihash" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" + "github.com/filecoin-project/specs-actors/actors/runtime" + + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" ) +func init() { + mh.Codes[0xf104] = "filecoin" +} + // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -func DefaultSyscalls() *types.VMSyscalls { - return &types.VMSyscalls{ - ValidatePoRep: actors.ValidatePoRep, +type SyscallBuilder func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls + +func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { + return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls { + return &syscallShim{ + ctx: ctx, + + cstate: cstate, + cst: cst, + + verifier: verifier, + } } } + +type syscallShim struct { + ctx context.Context + + cstate *state.StateTree + cst cbor.IpldStore + verifier ffiwrapper.Verifier +} + +func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { + var sum abi.PaddedPieceSize + for _, p := range pieces { + sum += p.Size + } + + commd, err := ffiwrapper.GenerateUnsealedCID(st, pieces) + if err != nil { + log.Errorf("generate data commitment failed: %s", err) + return cid.Undef, err + } + + return commd, nil +} + +func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { + return blake2b.Sum256(data) +} + +// Checks validity of the submitted consensus fault with the two block headers needed to prove the fault +// and an optional extra one to check common ancestry (as needed). +// Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.ConsensusFault, error) { + // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. + // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. + // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so + // (which runs a syntactic check), we do it directly on the CIDs. + + // (0) cheap preliminary checks + + // can blocks be decoded properly? + var blockA, blockB types.BlockHeader + if decodeErr := blockA.UnmarshalCBOR(bytes.NewReader(a)); decodeErr != nil { + return nil, xerrors.Errorf("cannot decode first block header: %w", decodeErr) + } + + if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil { + return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr) + } + + // are blocks the same? + if blockA.Cid().Equals(blockB.Cid()) { + return nil, fmt.Errorf("no consensus fault: submitted blocks are the same") + } + + // (1) check conditions necessary to any consensus fault + + // were blocks mined by same miner? + if blockA.Miner != blockB.Miner { + return nil, fmt.Errorf("no consensus fault: blocks not mined by same miner") + } + + // block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain). + if blockB.Height < blockA.Height { + return nil, fmt.Errorf("first block must not be of higher height than second") + } + + // (2) check for the consensus faults themselves + var consensusFault *runtime.ConsensusFault + + // (a) double-fork mining fault + if blockA.Height == blockB.Height { + consensusFault = &runtime.ConsensusFault{ + Target: blockA.Miner, + Epoch: blockB.Height, + Type: runtime.ConsensusFaultDoubleForkMining, + } + } + + // (b) time-offset mining fault + // strictly speaking no need to compare heights based on double fork mining check above, + // but at same height this would be a different fault. + if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { + consensusFault = &runtime.ConsensusFault{ + Target: blockA.Miner, + Epoch: blockB.Height, + Type: runtime.ConsensusFaultTimeOffsetMining, + } + } + + // (c) parent-grinding fault + // Here extra is the "witness", a third block that shows the connection between A and B as + // A's sibling and B's parent. + // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset + // + // B + // | + // [A, C] + var blockC types.BlockHeader + if len(extra) > 0 { + if decodeErr := blockC.UnmarshalCBOR(bytes.NewReader(extra)); decodeErr != nil { + return nil, xerrors.Errorf("cannot decode extra: %w", decodeErr) + } + + if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && + types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { + consensusFault = &runtime.ConsensusFault{ + Target: blockA.Miner, + Epoch: blockB.Height, + Type: runtime.ConsensusFaultParentGrinding, + } + } + } + + // (3) return if no consensus fault by now + if consensusFault == nil { + return nil, xerrors.Errorf("no consensus fault detected") + } + + // else + // (4) expensive final checks + + // check blocks are properly signed by their respective miner + // note we do not need to check extra's: it is a parent to block b + // which itself is signed, so it was willingly included by the miner + if sigErr := ss.VerifyBlockSig(&blockA); sigErr != nil { + return nil, xerrors.Errorf("cannot verify first block sig: %w", sigErr) + } + + if sigErr := ss.VerifyBlockSig(&blockB); sigErr != nil { + return nil, xerrors.Errorf("cannot verify second block sig: %w", sigErr) + } + + return consensusFault, nil +} + +func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error { + + // get appropriate miner actor + act, err := ss.cstate.GetActor(blk.Miner) + if err != nil { + return err + } + + // use that to get the miner state + mas, err := miner.Load(adt.WrapStore(ss.ctx, ss.cst), act) + if err != nil { + return err + } + + info, err := mas.Info() + if err != nil { + return err + } + + // and use to get resolved workerKey + waddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) + if err != nil { + return err + } + + if err := sigs.CheckBlockSignature(ss.ctx, blk, waddr); err != nil { + return err + } + + return nil +} + +func (ss *syscallShim) VerifyPoSt(proof proof.WindowPoStVerifyInfo) error { + ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("proof was invalid") + } + return nil +} + +func (ss *syscallShim) VerifySeal(info proof.SealVerifyInfo) error { + //_, span := trace.StartSpan(ctx, "ValidatePoRep") + //defer span.End() + + miner, err := address.NewIDAddress(uint64(info.Miner)) + if err != nil { + return xerrors.Errorf("weirdly failed to construct address: %w", err) + } + + ticket := []byte(info.Randomness) + proof := info.Proof + seed := []byte(info.InteractiveRandomness) + + log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) + + //func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber) + ok, err := ss.verifier.VerifySeal(info) + if err != nil { + return xerrors.Errorf("failed to validate PoRep: %w", err) + } + if !ok { + return fmt.Errorf("invalid proof") + } + + return nil +} + +func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { + // TODO: in genesis setup, we are currently faking signatures + + kaddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, addr) + if err != nil { + return err + } + + return sigs.Verify(&sig, kaddr, input) +} + +var BatchSealVerifyParallelism = goruntime.NumCPU() + +func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) { + out := make(map[address.Address][]bool) + + sema := make(chan struct{}, BatchSealVerifyParallelism) + + var wg sync.WaitGroup + for addr, seals := range inp { + results := make([]bool, len(seals)) + out[addr] = results + + for i, s := range seals { + wg.Add(1) + go func(ma address.Address, ix int, svi proof.SealVerifyInfo, res []bool) { + defer wg.Done() + sema <- struct{}{} + + if err := ss.VerifySeal(svi); err != nil { + log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err) + res[ix] = false + } else { + res[ix] = true + } + + <-sema + }(addr, i, s, results) + } + } + wg.Wait() + + return out, nil +} diff --git a/chain/vm/validation_test.go b/chain/vm/validation_test.go deleted file mode 100644 index 5e5a46a63..000000000 --- a/chain/vm/validation_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package vm_test - -import ( - "testing" - - "github.com/filecoin-project/chain-validation/pkg/suites" - - "github.com/filecoin-project/lotus/chain/validation" -) - -func TestStorageMinerValidation(t *testing.T) { - t.SkipNow() - factory := validation.NewFactories() - suites.CreateStorageMinerAndUpdatePeerID(t, factory) - -} - -func TestValueTransfer(t *testing.T) { - factory := validation.NewFactories() - suites.AccountValueTransferSuccess(t, factory, 126) - suites.AccountValueTransferZeroFunds(t, factory, 112) - suites.AccountValueTransferOverBalanceNonZero(t, factory, 0) - suites.AccountValueTransferOverBalanceZero(t, factory, 0) - suites.AccountValueTransferToSelf(t, factory, 0) - suites.AccountValueTransferFromKnownToUnknownAccount(t, factory, 0) - suites.AccountValueTransferFromUnknownToKnownAccount(t, factory, 0) - suites.AccountValueTransferFromUnknownToUnknownAccount(t, factory, 0) -} - -func TestMultiSig(t *testing.T) { - factory := validation.NewFactories() - suites.MultiSigActorConstructor(t, factory) - suites.MultiSigActorProposeApprove(t, factory) - suites.MultiSigActorProposeCancel(t, factory) -} diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 6fa6a432c..54ea47698 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -4,404 +4,312 @@ import ( "bytes" "context" "fmt" - "math/big" + "reflect" + "sync/atomic" + "time" block "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + mh "github.com/multiformats/go-multihash" cbg "github.com/whyrusleeping/cbor-gen" "go.opencensus.io/trace" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin/account" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/blockstore" + bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/bufbstore" ) var log = logging.Logger("vm") +var actorLog = logging.Logger("actors") +var gasOnActorExec = newGasCharge("OnActorExec", 0, 0) -const ( - gasFundTransfer = 10 - gasInvoke = 5 - - gasGetObj = 10 - gasGetPerByte = 1 - gasPutObj = 20 - gasPutPerByte = 2 - gasCommit = 50 - gasPerMessageByte = 2 +// stat counters +var ( + StatSends uint64 + StatApplied uint64 ) -const ( - outOfGasErrCode = 200 -) - -type VMContext struct { - ctx context.Context - - vm *VM - state *state.StateTree - msg *types.Message - height uint64 - cst *hamt.CborIpldStore - - gasAvailable types.BigInt - gasUsed types.BigInt - - sys *types.VMSyscalls - - // root cid of the state of the actor this invocation will be on - sroot cid.Cid - - // address that started invoke chain - origin address.Address -} - -// Message is the message that kicked off the current invocation -func (vmc *VMContext) Message() *types.Message { - return vmc.msg -} - -func (vmc *VMContext) GetRandomness(height uint64) ([]byte, aerrors.ActorError) { - - res, err := vmc.vm.rand.GetRandomness(vmc.ctx, int64(height)) - if err != nil { - return nil, aerrors.Escalate(err, "could not get randomness") - } - return res, nil -} - -func (vmc *VMContext) Sys() *types.VMSyscalls { - return vmc.sys -} - -// Storage interface - -func (vmc *VMContext) Put(i cbg.CBORMarshaler) (cid.Cid, aerrors.ActorError) { - c, err := vmc.cst.Put(context.TODO(), i) - if err != nil { - return cid.Undef, aerrors.HandleExternalError(err, fmt.Sprintf("putting object %T", i)) - } - return c, nil -} - -func (vmc *VMContext) Get(c cid.Cid, out cbg.CBORUnmarshaler) aerrors.ActorError { - err := vmc.cst.Get(context.TODO(), c, out) - if err != nil { - return aerrors.HandleExternalError(err, "getting cid") - } - return nil -} - -func (vmc *VMContext) GetHead() cid.Cid { - return vmc.sroot -} - -func (vmc *VMContext) Commit(oldh, newh cid.Cid) aerrors.ActorError { - if err := vmc.ChargeGas(gasCommit); err != nil { - return aerrors.Wrap(err, "out of gas") - } - if vmc.sroot != oldh { - return aerrors.New(1, "failed to update, inconsistent base reference") - } - - vmc.sroot = newh - return nil -} - -// End of storage interface - -// Storage provides access to the VM storage layer -func (vmc *VMContext) Storage() types.Storage { - return vmc -} - -func (vmc *VMContext) Ipld() *hamt.CborIpldStore { - return vmc.cst -} - -func (vmc *VMContext) Origin() address.Address { - return vmc.origin -} - -// Send allows the current execution context to invoke methods on other actors in the system -func (vmc *VMContext) Send(to address.Address, method uint64, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) { - ctx, span := trace.StartSpan(vmc.ctx, "vmc.Send") - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("to", to.String()), - trace.Int64Attribute("method", int64(method)), - trace.StringAttribute("value", value.String()), - ) - } - - msg := &types.Message{ - From: vmc.msg.To, - To: to, - Method: method, - Value: value, - Params: params, - GasLimit: vmc.gasAvailable, - } - - ret, err, _ := vmc.vm.send(ctx, msg, vmc, 0) - return ret, err -} - -// BlockHeight returns the height of the block this message was added to the chain in -func (vmc *VMContext) BlockHeight() uint64 { - return vmc.height -} - -func (vmc *VMContext) GasUsed() types.BigInt { - return vmc.gasUsed -} - -func (vmc *VMContext) ChargeGas(amount uint64) aerrors.ActorError { - toUse := types.NewInt(amount) - vmc.gasUsed = types.BigAdd(vmc.gasUsed, toUse) - if vmc.gasUsed.GreaterThan(vmc.gasAvailable) { - return aerrors.Newf(outOfGasErrCode, "not enough gas: used=%s, available=%s", vmc.gasUsed, vmc.gasAvailable) - } - return nil -} - -func (vmc *VMContext) StateTree() (types.StateTree, aerrors.ActorError) { - if vmc.msg.To != actors.InitAddress { - return nil, aerrors.Escalate(fmt.Errorf("only init actor can access state tree directly"), "invalid use of StateTree") - } - - return vmc.state, nil -} - -const GasVerifySignature = 50 - -func (vmctx *VMContext) VerifySignature(sig *types.Signature, act address.Address, data []byte) aerrors.ActorError { - if err := vmctx.ChargeGas(GasVerifySignature); err != nil { - return err - } - - if act.Protocol() == address.ID { - kaddr, err := ResolveToKeyAddr(vmctx.state, vmctx.cst, act) - if err != nil { - return aerrors.Wrap(err, "failed to resolve address to key address") - } - act = kaddr - } - - if err := sig.Verify(act, data); err != nil { - return aerrors.New(2, "signature verification failed") - } - - return nil -} - -func ResolveToKeyAddr(state types.StateTree, cst *hamt.CborIpldStore, addr address.Address) (address.Address, aerrors.ActorError) { +// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`. +func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, error) { if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 { return addr, nil } act, err := state.GetActor(addr) if err != nil { - return address.Undef, aerrors.Newf(1, "failed to find actor: %s", addr) + return address.Undef, xerrors.Errorf("failed to find actor: %s", addr) } - if act.Code != actors.AccountCodeCid { - return address.Undef, aerrors.New(1, "address was not for an account actor") + aast, err := account.Load(adt.WrapStore(context.TODO(), cst), act) + if err != nil { + return address.Undef, xerrors.Errorf("failed to get account actor state for %s: %w", addr, err) } - var aast actors.AccountActorState - if err := cst.Get(context.TODO(), act.Head, &aast); err != nil { - return address.Undef, aerrors.Escalate(err, fmt.Sprintf("failed to get account actor state for %s", addr)) - } - - return aast.Address, nil + return aast.PubkeyAddress() } -func (vmctx *VMContext) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) { - act, err := vmctx.state.GetActor(a) - switch err { - default: - return types.EmptyInt, aerrors.Escalate(err, "failed to look up actor balance") - case hamt.ErrNotFound: - return types.NewInt(0), nil - case nil: - return act.Balance, nil - } -} - -func (vmctx *VMContext) Context() context.Context { - return vmctx.ctx -} - -type hBlocks interface { - GetBlock(context.Context, cid.Cid) (block.Block, error) - AddBlock(block.Block) error -} - -var _ hBlocks = (*gasChargingBlocks)(nil) +var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) type gasChargingBlocks struct { - chargeGas func(uint64) aerrors.ActorError - under hBlocks + chargeGas func(GasCharge) + pricelist Pricelist + under cbor.IpldBlockstore } -func (bs *gasChargingBlocks) GetBlock(ctx context.Context, c cid.Cid) (block.Block, error) { - if err := bs.chargeGas(gasGetObj); err != nil { - return nil, err - } - blk, err := bs.under.GetBlock(ctx, c) +func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { + bs.chargeGas(bs.pricelist.OnIpldGet()) + blk, err := bs.under.Get(c) if err != nil { - return nil, err - } - if err := bs.chargeGas(uint64(len(blk.RawData())) * gasGetPerByte); err != nil { - return nil, err + return nil, aerrors.Escalate(err, "failed to get block from blockstore") } + bs.chargeGas(newGasCharge("OnIpldGetEnd", 0, 0).WithExtra(len(blk.RawData()))) + bs.chargeGas(gasOnActorExec) return blk, nil } -func (bs *gasChargingBlocks) AddBlock(blk block.Block) error { - if err := bs.chargeGas(gasPutObj + uint64(len(blk.RawData()))*gasPutPerByte); err != nil { - return err +func (bs *gasChargingBlocks) Put(blk block.Block) error { + bs.chargeGas(bs.pricelist.OnIpldPut(len(blk.RawData()))) + + if err := bs.under.Put(blk); err != nil { + return aerrors.Escalate(err, "failed to write data to disk") } - return bs.under.AddBlock(blk) + bs.chargeGas(gasOnActorExec) + return nil } -func (vm *VM) makeVMContext(ctx context.Context, sroot cid.Cid, msg *types.Message, origin address.Address, usedGas types.BigInt) *VMContext { - vmc := &VMContext{ - ctx: ctx, - vm: vm, - state: vm.cstate, - sroot: sroot, - msg: msg, - origin: origin, - height: vm.blockHeight, - sys: vm.Syscalls, +func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin address.Address, originNonce uint64, usedGas int64, nac uint64) *Runtime { + rt := &Runtime{ + ctx: ctx, + vm: vm, + state: vm.cstate, + origin: origin, + originNonce: originNonce, + height: vm.blockHeight, - gasUsed: usedGas, - gasAvailable: msg.GasLimit, + gasUsed: usedGas, + gasAvailable: msg.GasLimit, + numActorsCreated: nac, + pricelist: PricelistByEpoch(vm.blockHeight), + allowInternal: true, + callerValidated: false, + executionTrace: types.ExecutionTrace{Msg: msg}, } - vmc.cst = &hamt.CborIpldStore{ - Blocks: &gasChargingBlocks{vmc.ChargeGas, vm.cst.Blocks}, + + rt.cst = &cbor.BasicIpldStore{ + Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks}, Atlas: vm.cst.Atlas, } - return vmc + rt.Syscalls = pricedSyscalls{ + under: vm.Syscalls(ctx, vm.cstate, rt.cst), + chargeGas: rt.chargeGasFunc(1), + pl: rt.pricelist, + } + + vmm := *msg + resF, ok := rt.ResolveAddress(msg.From) + if !ok { + rt.Abortf(exitcode.SysErrInvalidReceiver, "resolve msg.From address failed") + } + vmm.From = resF + rt.Message = vmm + + return rt } +type UnsafeVM struct { + VM *VM +} + +func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message, origin address.Address, originNonce uint64, usedGas int64, nac uint64) *Runtime { + return vm.VM.makeRuntime(ctx, msg, origin, originNonce, usedGas, nac) +} + +type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) +type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version + type VM struct { - cstate *state.StateTree - base cid.Cid - cst *hamt.CborIpldStore - buf *bufbstore.BufferedBS - blockHeight uint64 - blockMiner address.Address - inv *invoker - rand Rand + cstate *state.StateTree + base cid.Cid + cst *cbor.BasicIpldStore + buf *bufbstore.BufferedBS + blockHeight abi.ChainEpoch + inv *Invoker + rand Rand + circSupplyCalc CircSupplyCalculator + ntwkVersion NtwkVersionGetter + baseFee abi.TokenAmount - Syscalls *types.VMSyscalls + Syscalls SyscallBuilder } -func NewVM(base cid.Cid, height uint64, r Rand, maddr address.Address, cbs blockstore.Blockstore) (*VM, error) { - buf := bufbstore.NewBufferedBstore(cbs) - cst := hamt.CSTFromBstore(buf) - state, err := state.LoadStateTree(cst, base) +type VMOpts struct { + StateBase cid.Cid + Epoch abi.ChainEpoch + Rand Rand + Bstore bstore.Blockstore + Syscalls SyscallBuilder + CircSupplyCalc CircSupplyCalculator + NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter + BaseFee abi.TokenAmount +} + +func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { + buf := bufbstore.NewBufferedBstore(opts.Bstore) + cst := cbor.NewCborStore(buf) + state, err := state.LoadStateTree(cst, opts.StateBase) if err != nil { return nil, err } return &VM{ - cstate: state, - base: base, - cst: cst, - buf: buf, - blockHeight: height, - blockMiner: maddr, - inv: newInvoker(), - rand: r, - Syscalls: DefaultSyscalls(), + cstate: state, + base: opts.StateBase, + cst: cst, + buf: buf, + blockHeight: opts.Epoch, + inv: NewInvoker(), + rand: opts.Rand, // TODO: Probably should be a syscall + circSupplyCalc: opts.CircSupplyCalc, + ntwkVersion: opts.NtwkVersion, + Syscalls: opts.Syscalls, + baseFee: opts.BaseFee, }, nil } type Rand interface { - GetRandomness(ctx context.Context, h int64) ([]byte, error) + GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) } type ApplyRet struct { types.MessageReceipt - ActorErr aerrors.ActorError + ActorErr aerrors.ActorError + ExecutionTrace types.ExecutionTrace + Duration time.Duration + GasCosts GasOutputs } -func (vm *VM) send(ctx context.Context, msg *types.Message, parent *VMContext, - gasCharge uint64) ([]byte, aerrors.ActorError, *VMContext) { +func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, + gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) { + + defer atomic.AddUint64(&StatSends, 1) st := vm.cstate - fromActor, err := st.GetActor(msg.From) - if err != nil { - return nil, aerrors.Absorb(err, 1, "could not find source actor"), nil - } - toActor, err := st.GetActor(msg.To) - if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { - a, err := TryCreateAccountActor(st, msg.To) - if err != nil { - return nil, aerrors.Absorb(err, 1, "could not create account"), nil - } - toActor = a - } else { - return nil, aerrors.Escalate(err, "getting actor"), nil - } - } - - gasUsed := types.NewInt(gasCharge) origin := msg.From + on := msg.Nonce + var nac uint64 = 0 + var gasUsed int64 if parent != nil { - gasUsed = types.BigAdd(parent.gasUsed, gasUsed) + gasUsed = parent.gasUsed origin = parent.origin + on = parent.originNonce + nac = parent.numActorsCreated } - vmctx := vm.makeVMContext(ctx, toActor.Head, msg, origin, gasUsed) + + rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac) + rt.lastGasChargeTime = start if parent != nil { + rt.lastGasChargeTime = parent.lastGasChargeTime + rt.lastGasCharge = parent.lastGasCharge defer func() { - parent.gasUsed = vmctx.gasUsed + parent.gasUsed = rt.gasUsed + parent.lastGasChargeTime = rt.lastGasChargeTime + parent.lastGasCharge = rt.lastGasCharge }() } - - if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { - if aerr := vmctx.ChargeGas(gasFundTransfer); aerr != nil { - return nil, aerrors.Wrap(aerr, "sending funds"), nil - } - - if err := Transfer(fromActor, toActor, msg.Value); err != nil { - return nil, aerrors.Absorb(err, 1, "failed to transfer funds"), nil + if gasCharge != nil { + if err := rt.chargeGasSafe(*gasCharge); err != nil { + // this should never happen + return nil, aerrors.Wrap(err, "not enough gas for initial message charge, this should not happen"), rt } } - if msg.Method != 0 { - ret, err := vm.Invoke(toActor, vmctx, msg.Method, msg.Params) - if !aerrors.IsFatal(err) { - toActor.Head = vmctx.Storage().GetHead() + ret, err := func() ([]byte, aerrors.ActorError) { + _ = rt.chargeGasSafe(newGasCharge("OnGetActor", 0, 0)) + toActor, err := st.GetActor(msg.To) + if err != nil { + if xerrors.Is(err, types.ErrActorNotFound) { + a, err := TryCreateAccountActor(rt, msg.To) + if err != nil { + return nil, aerrors.Wrapf(err, "could not create account") + } + toActor = a + } else { + return nil, aerrors.Escalate(err, "getting actor") + } } - return ret, err, vmctx + + if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil { + return nil, aerrors.Wrap(aerr, "not enough gas for method invocation") + } + + // not charging any gas, just logging + //nolint:errcheck + defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0)) + + if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { + if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil { + return nil, aerrors.Wrap(err, "failed to transfer funds") + } + } + + if msg.Method != 0 { + var ret []byte + _ = rt.chargeGasSafe(gasOnActorExec) + ret, err := vm.Invoke(toActor, rt, msg.Method, msg.Params) + return ret, err + } + return nil, nil + }() + + mr := types.MessageReceipt{ + ExitCode: aerrors.RetCode(err), + Return: ret, + GasUsed: rt.gasUsed, + } + rt.executionTrace.MsgRct = &mr + rt.executionTrace.Duration = time.Since(start) + if err != nil { + rt.executionTrace.Error = err.Error() } - return nil, nil, vmctx + return ret, err, rt } func checkMessage(msg *types.Message) error { - if msg.GasLimit == types.EmptyInt { - return xerrors.Errorf("message gas no gas limit set") + if msg.GasLimit == 0 { + return xerrors.Errorf("message has no gas limit set") + } + if msg.GasLimit < 0 { + return xerrors.Errorf("message has negative gas limit") } - if msg.GasPrice == types.EmptyInt { - return xerrors.Errorf("message gas no gas price set") + if msg.GasFeeCap == types.EmptyInt { + return xerrors.Errorf("message fee cap not set") + } + + if msg.GasPremium == types.EmptyInt { + return xerrors.Errorf("message gas premium not set") } if msg.Value == types.EmptyInt { @@ -411,9 +319,30 @@ func checkMessage(msg *types.Message) error { return nil } -func (vm *VM) ApplyMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { +func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { + start := build.Clock.Now() + defer atomic.AddUint64(&StatApplied, 1) + ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start) + rt.finilizeGasTracing() + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + ExitCode: aerrors.RetCode(actorErr), + Return: ret, + GasUsed: 0, + }, + ActorErr: actorErr, + ExecutionTrace: rt.executionTrace, + GasCosts: GasOutputs{}, + Duration: time.Since(start), + }, actorErr +} + +func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) { + start := build.Clock.Now() ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage") defer span.End() + defer atomic.AddUint64(&StatApplied, 1) + msg := cmsg.VMMessage() if span.IsRecordingEvents() { span.AddAttributes( trace.StringAttribute("to", msg.To.String()), @@ -426,77 +355,170 @@ func (vm *VM) ApplyMessage(ctx context.Context, msg *types.Message) (*ApplyRet, return nil, err } + pl := PricelistByEpoch(vm.blockHeight) + + msgGas := pl.OnChainMessage(cmsg.ChainLength()) + msgGasCost := msgGas.Total() + // this should never happen, but is currently still exercised by some tests + if msgGasCost > msg.GasLimit { + gasOutputs := ZeroGasOutputs() + gasOutputs.MinerPenalty = types.BigMul(vm.baseFee, abi.NewTokenAmount(msgGasCost)) + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + ExitCode: exitcode.SysErrOutOfGas, + GasUsed: 0, + }, + GasCosts: gasOutputs, + Duration: time.Since(start), + }, nil + } + st := vm.cstate - if err := st.Snapshot(); err != nil { - return nil, xerrors.Errorf("snapshot failed: %w", err) - } + minerPenaltyAmount := types.BigMul(vm.baseFee, abi.NewTokenAmount(msg.GasLimit)) fromActor, err := st.GetActor(msg.From) + // this should never happen, but is currently still exercised by some tests if err != nil { - return nil, xerrors.Errorf("from actor not found: %w", err) + if xerrors.Is(err, types.ErrActorNotFound) { + gasOutputs := ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + ExitCode: exitcode.SysErrSenderInvalid, + GasUsed: 0, + }, + ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "actor not found: %s", msg.From), + GasCosts: gasOutputs, + Duration: time.Since(start), + }, nil + } + return nil, xerrors.Errorf("failed to look up from actor: %w", err) } - serMsg, err := msg.Serialize() - if err != nil { - return nil, xerrors.Errorf("could not serialize message: %w", err) - } - msgGasCost := uint64(len(serMsg)) * gasPerMessageByte - - gascost := types.BigMul(msg.GasLimit, msg.GasPrice) - totalCost := types.BigAdd(gascost, msg.Value) - if fromActor.Balance.LessThan(totalCost) { - return nil, xerrors.Errorf("not enough funds (%s < %s)", fromActor.Balance, totalCost) - } - - gasHolder := &types.Actor{Balance: types.NewInt(0)} - if err := Transfer(fromActor, gasHolder, gascost); err != nil { - return nil, xerrors.Errorf("failed to withdraw gas funds: %w", err) + // this should never happen, but is currently still exercised by some tests + if !fromActor.IsAccountActor() { + gasOutputs := ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + ExitCode: exitcode.SysErrSenderInvalid, + GasUsed: 0, + }, + ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "send from not account actor: %s", fromActor.Code), + GasCosts: gasOutputs, + Duration: time.Since(start), + }, nil } if msg.Nonce != fromActor.Nonce { - return nil, xerrors.Errorf("invalid nonce (got %d, expected %d)", msg.Nonce, fromActor.Nonce) + gasOutputs := ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + ExitCode: exitcode.SysErrSenderStateInvalid, + GasUsed: 0, + }, + ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid, + "actor nonce invalid: msg:%d != state:%d", msg.Nonce, fromActor.Nonce), + + GasCosts: gasOutputs, + Duration: time.Since(start), + }, nil } - fromActor.Nonce++ - ret, actorErr, vmctx := vm.send(ctx, msg, nil, msgGasCost) + gascost := types.BigMul(types.NewInt(uint64(msg.GasLimit)), msg.GasFeeCap) + if fromActor.Balance.LessThan(gascost) { + gasOutputs := ZeroGasOutputs() + gasOutputs.MinerPenalty = minerPenaltyAmount + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + ExitCode: exitcode.SysErrSenderStateInvalid, + GasUsed: 0, + }, + ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid, + "actor balance less than needed: %s < %s", types.FIL(fromActor.Balance), types.FIL(gascost)), + GasCosts: gasOutputs, + Duration: time.Since(start), + }, nil + } + gasHolder := &types.Actor{Balance: types.NewInt(0)} + if err := vm.transferToGasHolder(msg.From, gasHolder, gascost); err != nil { + return nil, xerrors.Errorf("failed to withdraw gas funds: %w", err) + } + + if err := vm.incrementNonce(msg.From); err != nil { + return nil, err + } + + if err := st.Snapshot(ctx); err != nil { + return nil, xerrors.Errorf("snapshot failed: %w", err) + } + defer st.ClearSnapshot() + + ret, actorErr, rt := vm.send(ctx, msg, nil, &msgGas, start) if aerrors.IsFatal(actorErr) { return nil, xerrors.Errorf("[from=%s,to=%s,n=%d,m=%d,h=%d] fatal error: %w", msg.From, msg.To, msg.Nonce, msg.Method, vm.blockHeight, actorErr) } + if actorErr != nil { - log.Warnf("[from=%s,to=%s,n=%d,m=%d,h=%d] Send actor error: %+v", msg.From, msg.To, msg.Nonce, msg.Method, vm.blockHeight, actorErr) + log.Warnw("Send actor error", "from", msg.From, "to", msg.To, "nonce", msg.Nonce, "method", msg.Method, "height", vm.blockHeight, "error", fmt.Sprintf("%+v", actorErr)) } - var errcode uint8 - var gasUsed types.BigInt + if actorErr != nil && len(ret) != 0 { + // This should not happen, something is wonky + return nil, xerrors.Errorf("message invocation errored, but had a return value anyway: %w", actorErr) + } + + if rt == nil { + return nil, xerrors.Errorf("send returned nil runtime, send error was: %s", actorErr) + } + + if len(ret) != 0 { + // safely override actorErr since it must be nil + actorErr = rt.chargeGasSafe(rt.Pricelist().OnChainReturnValue(len(ret))) + if actorErr != nil { + ret = nil + } + } + + var errcode exitcode.ExitCode + var gasUsed int64 if errcode = aerrors.RetCode(actorErr); errcode != 0 { - gasUsed = msg.GasLimit // revert all state changes since snapshot if err := st.Revert(); err != nil { return nil, xerrors.Errorf("revert state failed: %w", err) } - } else { - // refund unused gas - gasUsed = vmctx.GasUsed() - refund := types.BigMul(types.BigSub(msg.GasLimit, gasUsed), msg.GasPrice) - if err := Transfer(gasHolder, fromActor, refund); err != nil { - return nil, xerrors.Errorf("failed to refund gas") - } } - miner, err := st.GetActor(vm.blockMiner) - if err != nil { - return nil, xerrors.Errorf("getting block miner actor (%s) failed: %w", vm.blockMiner, err) + rt.finilizeGasTracing() + + gasUsed = rt.gasUsed + if gasUsed < 0 { + gasUsed = 0 + } + gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium) + + if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, + gasOutputs.BaseFeeBurn); err != nil { + return nil, xerrors.Errorf("failed to burn base fee: %w", err) } - // TODO: support multiple blocks in a tipset - // TODO: actually wire this up (miner is undef for now) - gasReward := types.BigMul(msg.GasPrice, gasUsed) - if err := Transfer(gasHolder, miner, gasReward); err != nil { + if err := vm.transferFromGasHolder(reward.Address, gasHolder, gasOutputs.MinerTip); err != nil { return nil, xerrors.Errorf("failed to give miner gas reward: %w", err) } + if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, + gasOutputs.OverEstimationBurn); err != nil { + return nil, xerrors.Errorf("failed to burn overestimation fee: %w", err) + } + + // refund unused gas + if err := vm.transferFromGasHolder(msg.From, gasHolder, gasOutputs.Refund); err != nil { + return nil, xerrors.Errorf("failed to refund gas: %w", err) + } + if types.BigCmp(types.NewInt(0), gasHolder.Balance) != 0 { return nil, xerrors.Errorf("gas handling math is wrong") } @@ -507,14 +529,13 @@ func (vm *VM) ApplyMessage(ctx context.Context, msg *types.Message) (*ApplyRet, Return: ret, GasUsed: gasUsed, }, - ActorErr: actorErr, + ActorErr: actorErr, + ExecutionTrace: rt.executionTrace, + GasCosts: gasOutputs, + Duration: time.Since(start), }, nil } -func (vm *VM) SetBlockMiner(m address.Address) { - vm.blockMiner = m -} - func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorError) { act, err := vm.cstate.GetActor(addr) if err != nil { @@ -531,30 +552,77 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { from := vm.buf to := vm.buf.Read() - root, err := vm.cstate.Flush() + root, err := vm.cstate.Flush(ctx) if err != nil { return cid.Undef, xerrors.Errorf("flushing vm: %w", err) } - if err := Copy(from, to, root); err != nil { + if err := Copy(ctx, from, to, root); err != nil { return cid.Undef, xerrors.Errorf("copying tree: %w", err) } return root, nil } -func linksForObj(blk block.Block) ([]cid.Cid, error) { +// MutateState usage: MutateState(ctx, idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...}) +func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error { + act, err := vm.cstate.GetActor(addr) + if err != nil { + return xerrors.Errorf("actor not found: %w", err) + } + + st := reflect.New(reflect.TypeOf(fn).In(1).Elem()) + if err := vm.cst.Get(ctx, act.Head, st.Interface()); err != nil { + return xerrors.Errorf("read actor head: %w", err) + } + + out := reflect.ValueOf(fn).Call([]reflect.Value{reflect.ValueOf(vm.cst), st}) + if !out[0].IsNil() && out[0].Interface().(error) != nil { + return out[0].Interface().(error) + } + + head, err := vm.cst.Put(ctx, st.Interface()) + if err != nil { + return xerrors.Errorf("put new actor head: %w", err) + } + + act.Head = head + + if err := vm.cstate.SetActor(addr, act); err != nil { + return xerrors.Errorf("set actor: %w", err) + } + + return nil +} + +func linksForObj(blk block.Block, cb func(cid.Cid)) error { switch blk.Cid().Prefix().Codec { case cid.DagCBOR: - return cbg.ScanForLinks(bytes.NewReader(blk.RawData())) + err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb) + if err != nil { + return xerrors.Errorf("cbg.ScanForLinks: %w", err) + } + return nil + case cid.Raw: + // We implicitly have all children of raw blocks. + return nil default: - return nil, xerrors.Errorf("vm flush copy method only supports dag cbor") + return xerrors.Errorf("vm flush copy method only supports dag cbor") } } -func Copy(from, to blockstore.Blockstore, root cid.Cid) error { +func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error { + ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint + defer span.End() + + var numBlocks int + var totalCopySize int + var batch []block.Block batchCp := func(blk block.Block) error { + numBlocks++ + totalCopySize += len(blk.RawData()) + batch = append(batch, blk) if len(batch) > 100 { if err := to.PutMany(batch); err != nil { @@ -566,7 +634,7 @@ func Copy(from, to blockstore.Blockstore, root cid.Cid) error { } if err := copyRec(from, to, root, batchCp); err != nil { - return err + return xerrors.Errorf("copyRec: %w", err) } if len(batch) > 0 { @@ -575,6 +643,11 @@ func Copy(from, to blockstore.Blockstore, root cid.Cid) error { } } + span.AddAttributes( + trace.Int64Attribute("numBlocks", int64(numBlocks)), + trace.Int64Attribute("copySize", int64(totalCopySize)), + ) + return nil } @@ -589,31 +662,50 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) return xerrors.Errorf("get %s failed: %w", root, err) } - links, err := linksForObj(blk) - if err != nil { - return err - } - - for _, link := range links { - if link.Prefix().MhType == 0 { - continue + var lerr error + err = linksForObj(blk, func(link cid.Cid) { + if lerr != nil { + // Theres no erorr return on linksForObj callback :( + return } - has, err := to.Has(link) - if err != nil { - return err + prefix := link.Prefix() + if prefix.Codec == cid.FilCommitmentSealed || prefix.Codec == cid.FilCommitmentUnsealed { + return } - if has { - continue + + // We always have blocks inlined into CIDs, but we may not have their children. + if prefix.MhType == mh.IDENTITY { + // Unless the inlined block has no children. + if prefix.Codec == cid.Raw { + return + } + } else { + // If we have an object, we already have its children, skip the object. + has, err := to.Has(link) + if err != nil { + lerr = xerrors.Errorf("has: %w", err) + return + } + if has { + return + } } if err := copyRec(from, to, link, cp); err != nil { - return err + lerr = err + return } + }) + if err != nil { + return xerrors.Errorf("linksForObj (%x): %w", blk.RawData(), err) + } + if lerr != nil { + return lerr } if err := cp(blk); err != nil { - return err + return xerrors.Errorf("copy: %w", err) } return nil } @@ -622,52 +714,133 @@ func (vm *VM) StateTree() types.StateTree { return vm.cstate } -func (vm *VM) SetBlockHeight(h uint64) { +func (vm *VM) SetBlockHeight(h abi.ChainEpoch) { vm.blockHeight = h } -func (vm *VM) Invoke(act *types.Actor, vmctx *VMContext, method uint64, params []byte) ([]byte, aerrors.ActorError) { - ctx, span := trace.StartSpan(vmctx.ctx, "vm.Invoke") +func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { + ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke") defer span.End() if span.IsRecordingEvents() { span.AddAttributes( - trace.StringAttribute("to", vmctx.Message().To.String()), + trace.StringAttribute("to", rt.Receiver().String()), trace.Int64Attribute("method", int64(method)), - trace.StringAttribute("value", vmctx.Message().Value.String()), + trace.StringAttribute("value", rt.ValueReceived().String()), ) } var oldCtx context.Context - oldCtx, vmctx.ctx = vmctx.ctx, ctx + oldCtx, rt.ctx = rt.ctx, ctx defer func() { - vmctx.ctx = oldCtx + rt.ctx = oldCtx }() - if err := vmctx.ChargeGas(gasInvoke); err != nil { - return nil, aerrors.Wrap(err, "invokeing") - } - ret, err := vm.inv.Invoke(act, vmctx, method, params) + ret, err := vm.inv.Invoke(act.Code, rt, method, params) if err != nil { return nil, err } return ret, nil } -func Transfer(from, to *types.Actor, amt types.BigInt) error { +func (vm *VM) SetInvoker(i *Invoker) { + vm.inv = i +} + +func (vm *VM) GetNtwkVersion(ctx context.Context, ce abi.ChainEpoch) network.Version { + return vm.ntwkVersion(ctx, ce) +} + +func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { + return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) +} + +func (vm *VM) incrementNonce(addr address.Address) error { + return vm.cstate.MutateActor(addr, func(a *types.Actor) error { + a.Nonce++ + return nil + }) +} + +func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.ActorError { if from == to { return nil } - if amt.LessThan(types.NewInt(0)) { - return xerrors.Errorf("attempted to transfer negative value") + fromID, err := vm.cstate.LookupID(from) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) } - if err := deductFunds(from, amt); err != nil { - return err + toID, err := vm.cstate.LookupID(to) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) } - depositFunds(to, amt) + + if fromID == toID { + return nil + } + + if amt.LessThan(types.NewInt(0)) { + return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) + } + + f, err := vm.cstate.GetActor(fromID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + } + + t, err := vm.cstate.GetActor(toID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err) + } + + if err := deductFunds(f, amt); err != nil { + return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds (%s): %s", types.FIL(amt), err) + } + depositFunds(t, amt) + + if err := vm.cstate.SetActor(fromID, f); err != nil { + return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) + } + + if err := vm.cstate.SetActor(toID, t); err != nil { + return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) + } + return nil } +func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { + if amt.LessThan(types.NewInt(0)) { + return xerrors.Errorf("attempted to transfer negative value to gas holder") + } + + return vm.cstate.MutateActor(addr, func(a *types.Actor) error { + if err := deductFunds(a, amt); err != nil { + return err + } + depositFunds(gasHolder, amt) + return nil + }) +} + +func (vm *VM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { + if amt.LessThan(types.NewInt(0)) { + return xerrors.Errorf("attempted to transfer negative value from gas holder") + } + + if amt.Equals(big.NewInt(0)) { + return nil + } + + return vm.cstate.MutateActor(addr, func(a *types.Actor) error { + if err := deductFunds(gasHolder, amt); err != nil { + return err + } + depositFunds(a, amt) + return nil + }) +} + func deductFunds(act *types.Actor, amt types.BigInt) error { if act.Balance.LessThan(amt) { return fmt.Errorf("not enough funds") @@ -680,16 +853,3 @@ func deductFunds(act *types.Actor, amt types.BigInt) error { func depositFunds(act *types.Actor, amt types.BigInt) { act.Balance = types.BigAdd(act.Balance, amt) } - -var miningRewardTotal = types.FromFil(build.MiningRewardTotal) -var blocksPerEpoch = types.NewInt(build.BlocksPerEpoch) - -// MiningReward returns correct mining reward -// coffer is amount of FIL in NetworkAddress -func MiningReward(remainingReward types.BigInt) types.BigInt { - ci := big.NewInt(0).Set(remainingReward.Int) - res := ci.Mul(ci, build.InitialReward) - res = res.Div(res, miningRewardTotal.Int) - res = res.Div(res, blocksPerEpoch.Int) - return types.BigInt{res} -} diff --git a/chain/vm/vm_test.go b/chain/vm/vm_test.go deleted file mode 100644 index 6ec007576..000000000 --- a/chain/vm/vm_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package vm - -import ( - "fmt" - "math/big" - "testing" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" -) - -const HalvingPeriodEpochs = 6 * 365 * 24 * 60 * 2 - -func TestBlockReward(t *testing.T) { - coffer := types.FromFil(build.MiningRewardTotal).Int - sum := new(big.Int) - N := HalvingPeriodEpochs - for i := 0; i < N; i++ { - a := MiningReward(types.BigInt{coffer}) - sum = sum.Add(sum, a.Int) - coffer = coffer.Sub(coffer, a.Int) - } - - //sum = types.BigMul(sum, types.NewInt(60)) - - fmt.Println("After a halving period") - fmt.Printf("Total reward: %d\n", build.MiningRewardTotal) - fmt.Printf("Remaining: %s\n", types.BigDiv(types.BigInt{coffer}, types.NewInt(build.FilecoinPrecision))) - fmt.Printf("Given out: %s\n", types.BigDiv(types.BigInt{sum}, types.NewInt(build.FilecoinPrecision))) -} diff --git a/chain/wallet/wallet.go b/chain/wallet/wallet.go index 9472d36e0..7cdb1929e 100644 --- a/chain/wallet/wallet.go +++ b/chain/wallet/wallet.go @@ -2,27 +2,31 @@ package wallet import ( "context" - "fmt" "sort" "strings" "sync" - bls "github.com/filecoin-project/filecoin-ffi" - - logging "github.com/ipfs/go-log" - "github.com/minio/blake2b-simd" + "github.com/filecoin-project/go-state-types/crypto" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-crypto" + + _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures + _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" ) var log = logging.Logger("wallet") const ( - KNamePrefix = "wallet-" - KDefault = "default" + KNamePrefix = "wallet-" + KTrashPrefix = "trash-" + KDefault = "default" + KTBLS = "bls" + KTSecp256k1 = "secp256k1" ) type Wallet struct { @@ -52,7 +56,7 @@ func KeyWallet(keys ...*Key) *Wallet { } } -func (w *Wallet) Sign(ctx context.Context, addr address.Address, msg []byte) (*types.Signature, error) { +func (w *Wallet) Sign(ctx context.Context, addr address.Address, msg []byte) (*crypto.Signature, error) { ki, err := w.findKey(addr) if err != nil { return nil, err @@ -61,31 +65,7 @@ func (w *Wallet) Sign(ctx context.Context, addr address.Address, msg []byte) (*t return nil, xerrors.Errorf("signing using key '%s': %w", addr.String(), types.ErrKeyInfoNotFound) } - switch ki.Type { - case types.KTSecp256k1: - b2sum := blake2b.Sum256(msg) - sig, err := crypto.Sign(ki.PrivateKey, b2sum[:]) - if err != nil { - return nil, err - } - - return &types.Signature{ - Type: types.KTSecp256k1, - Data: sig, - }, nil - case types.KTBLS: - var pk bls.PrivateKey - copy(pk[:], ki.PrivateKey) - sig := bls.PrivateKeySign(pk, msg) - - return &types.Signature{ - Type: types.KTBLS, - Data: sig[:], - }, nil - - default: - return nil, fmt.Errorf("cannot sign with unsupported key type: %q", ki.Type) - } + return sigs.Sign(ActSigType(ki.Type), ki.PrivateKey, msg) } func (w *Wallet) findKey(addr address.Address) (*Key, error) { @@ -203,33 +183,19 @@ func (w *Wallet) SetDefault(a address.Address) error { return nil } -func GenerateKey(typ string) (*Key, error) { - switch typ { - case types.KTSecp256k1: - priv, err := crypto.GenerateKey() - if err != nil { - return nil, err - } - ki := types.KeyInfo{ - Type: typ, - PrivateKey: priv, - } - - return NewKey(ki) - case types.KTBLS: - priv := bls.PrivateKeyGenerate() - ki := types.KeyInfo{ - Type: typ, - PrivateKey: priv[:], - } - - return NewKey(ki) - default: - return nil, xerrors.Errorf("invalid key type: %s", typ) +func GenerateKey(typ crypto.SigType) (*Key, error) { + pk, err := sigs.Generate(typ) + if err != nil { + return nil, err } + ki := types.KeyInfo{ + Type: kstoreSigType(typ), + PrivateKey: pk, + } + return NewKey(ki) } -func (w *Wallet) GenerateKey(typ string) (address.Address, error) { +func (w *Wallet) GenerateKey(typ crypto.SigType) (address.Address, error) { w.lk.Lock() defer w.lk.Unlock() @@ -265,6 +231,23 @@ func (w *Wallet) HasKey(addr address.Address) (bool, error) { return k != nil, nil } +func (w *Wallet) DeleteKey(addr address.Address) error { + k, err := w.findKey(addr) + if err != nil { + return xerrors.Errorf("failed to delete key %s : %w", addr, err) + } + + if err := w.keystore.Put(KTrashPrefix+k.Address.String(), k.KeyInfo); err != nil { + return xerrors.Errorf("failed to mark key %s as trashed: %w", addr, err) + } + + if err := w.keystore.Delete(KNamePrefix + k.Address.String()); err != nil { + return xerrors.Errorf("failed to delete key %s: %w", addr, err) + } + + return nil +} + type Key struct { types.KeyInfo @@ -277,31 +260,48 @@ func NewKey(keyinfo types.KeyInfo) (*Key, error) { KeyInfo: keyinfo, } - switch k.Type { - case types.KTSecp256k1: - k.PublicKey = crypto.PublicKey(k.PrivateKey) + var err error + k.PublicKey, err = sigs.ToPublic(ActSigType(k.Type), k.PrivateKey) + if err != nil { + return nil, err + } - var err error + switch k.Type { + case KTSecp256k1: k.Address, err = address.NewSecp256k1Address(k.PublicKey) if err != nil { return nil, xerrors.Errorf("converting Secp256k1 to address: %w", err) } - - case types.KTBLS: - var pk bls.PrivateKey - copy(pk[:], k.PrivateKey) - pub := bls.PrivateKeyPublicKey(pk) - k.PublicKey = pub[:] - - var err error + case KTBLS: k.Address, err = address.NewBLSAddress(k.PublicKey) if err != nil { return nil, xerrors.Errorf("converting BLS to address: %w", err) } - default: return nil, xerrors.Errorf("unknown key type") } return k, nil } + +func kstoreSigType(typ crypto.SigType) string { + switch typ { + case crypto.SigTypeBLS: + return KTBLS + case crypto.SigTypeSecp256k1: + return KTSecp256k1 + default: + return "" + } +} + +func ActSigType(typ string) crypto.SigType { + switch typ { + case KTBLS: + return crypto.SigTypeBLS + case KTSecp256k1: + return crypto.SigTypeSecp256k1 + default: + return 0 + } +} diff --git a/cli/auth.go b/cli/auth.go index b912dda4b..ba20b2bcc 100644 --- a/cli/auth.go +++ b/cli/auth.go @@ -1,12 +1,15 @@ package cli import ( - "errors" "fmt" - "gopkg.in/urfave/cli.v2" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/api/apistruct" + "github.com/filecoin-project/lotus/node/repo" ) var authCmd = &cli.Command{ @@ -14,6 +17,7 @@ var authCmd = &cli.Command{ Usage: "Manage RPC permissions", Subcommands: []*cli.Command{ authCreateAdminToken, + authApiInfoToken, }, } @@ -37,13 +41,13 @@ var authCreateAdminToken = &cli.Command{ ctx := ReqContext(cctx) if !cctx.IsSet("perm") { - return errors.New("--perm flag not set") + return xerrors.New("--perm flag not set") } perm := cctx.String("perm") idx := 0 for i, p := range apistruct.AllPermissions { - if perm == p { + if auth.Permission(perm) == p { idx = i + 1 } } @@ -64,3 +68,66 @@ var authCreateAdminToken = &cli.Command{ return nil }, } + +var authApiInfoToken = &cli.Command{ + Name: "api-info", + Usage: "Get token with API info required to connect to this node", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "perm", + Usage: "permission to assign to the token, one of: read, write, sign, admin", + }, + }, + + Action: func(cctx *cli.Context) error { + napi, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := ReqContext(cctx) + + if !cctx.IsSet("perm") { + return xerrors.New("--perm flag not set") + } + + perm := cctx.String("perm") + idx := 0 + for i, p := range apistruct.AllPermissions { + if auth.Permission(perm) == p { + idx = i + 1 + } + } + + if idx == 0 { + return fmt.Errorf("--perm flag has to be one of: %s", apistruct.AllPermissions) + } + + // slice on [:idx] so for example: 'sign' gives you [read, write, sign] + token, err := napi.AuthNew(ctx, apistruct.AllPermissions[:idx]) + if err != nil { + return err + } + + ti, ok := cctx.App.Metadata["repoType"] + if !ok { + log.Errorf("unknown repo type, are you sure you want to use GetAPI?") + ti = repo.FullNode + } + t, ok := ti.(repo.RepoType) + if !ok { + log.Errorf("repoType type does not match the type of repo.RepoType") + } + + ainfo, err := GetAPIInfo(cctx, t) + if err != nil { + return xerrors.Errorf("could not get API info: %w", err) + } + + // TODO: Log in audit log when it is implemented + + fmt.Printf("%s=%s:%s\n", envForRepo(t), string(token), ainfo.Addr) + return nil + }, +} diff --git a/cli/chain.go b/cli/chain.go index cb4133d49..73b25324b 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -1,17 +1,35 @@ package cli import ( + "bytes" "context" "encoding/json" "fmt" + "os" + "os/exec" + "path" + "strconv" "strings" "time" + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/builtin/account" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/filecoin-project/specs-actors/actors/builtin/power" + "github.com/filecoin-project/specs-actors/actors/util/adt" cid "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - "gopkg.in/urfave/cli.v2" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" types "github.com/filecoin-project/lotus/chain/types" ) @@ -22,10 +40,16 @@ var chainCmd = &cli.Command{ chainHeadCmd, chainGetBlock, chainReadObjCmd, + chainDeleteObjCmd, + chainStatObjCmd, chainGetMsgCmd, chainSetHeadCmd, chainListCmd, chainGetCmd, + chainBisectCmd, + chainExportCmd, + slashConsensusFault, + chainGasPriceCmd, }, } @@ -53,8 +77,9 @@ var chainHeadCmd = &cli.Command{ } var chainGetBlock = &cli.Command{ - Name: "getblock", - Usage: "Get a block and print its details", + Name: "getblock", + Usage: "Get a block and print its details", + ArgsUsage: "[blockCid]", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "raw", @@ -143,8 +168,9 @@ func apiMsgCids(in []api.Message) []cid.Cid { } var chainReadObjCmd = &cli.Command{ - Name: "read-obj", - Usage: "Read the raw bytes of an object", + Name: "read-obj", + Usage: "Read the raw bytes of an object", + ArgsUsage: "[objectCid]", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -168,9 +194,16 @@ var chainReadObjCmd = &cli.Command{ }, } -var chainGetMsgCmd = &cli.Command{ - Name: "getmessage", - Usage: "Get and print a message by its cid", +var chainDeleteObjCmd = &cli.Command{ + Name: "delete-obj", + Usage: "Delete an object from the chain blockstore", + Description: "WARNING: Removing wrong objects from the chain blockstore may lead to sync issues", + ArgsUsage: "[objectCid]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + }, + }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -179,6 +212,88 @@ var chainGetMsgCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) + c, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse cid input: %s", err) + } + + if !cctx.Bool("really-do-it") { + return xerrors.Errorf("pass the --really-do-it flag to proceed") + } + + err = api.ChainDeleteObj(ctx, c) + if err != nil { + return err + } + + fmt.Printf("Obj %s deleted\n", c.String()) + return nil + }, +} + +var chainStatObjCmd = &cli.Command{ + Name: "stat-obj", + Usage: "Collect size and ipld link counts for objs", + ArgsUsage: "[cid]", + Description: `Collect object size and ipld link count for an object. + + When a base is provided it will be walked first, and all links visisted + will be ignored when the passed in object is walked. +`, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "base", + Usage: "ignore links found in this obj", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + obj, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse cid input: %s", err) + } + + base := cid.Undef + if cctx.IsSet("base") { + base, err = cid.Decode(cctx.String("base")) + if err != nil { + return err + } + } + + stats, err := api.ChainStatObj(ctx, obj, base) + if err != nil { + return err + } + + fmt.Printf("Links: %d\n", stats.Links) + fmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size) + return nil + }, +} + +var chainGetMsgCmd = &cli.Command{ + Name: "getmessage", + Usage: "Get and print a message by its cid", + ArgsUsage: "[messageCid]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass a cid of a message to get") + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + c, err := cid.Decode(cctx.Args().First()) if err != nil { return xerrors.Errorf("failed to parse cid input: %w", err) @@ -212,8 +327,9 @@ var chainGetMsgCmd = &cli.Command{ } var chainSetHeadCmd = &cli.Command{ - Name: "sethead", - Usage: "manually set the local nodes head tipset (Caution: normally only used for recovery)", + Name: "sethead", + Usage: "manually set the local nodes head tipset (Caution: normally only used for recovery)", + ArgsUsage: "[tipsetkey]", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "genesis", @@ -238,10 +354,10 @@ var chainSetHeadCmd = &cli.Command{ ts, err = api.ChainGetGenesis(ctx) } if ts == nil && cctx.IsSet("epoch") { - ts, err = api.ChainGetTipSetByHeight(ctx, cctx.Uint64("epoch"), nil) + ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK) } if ts == nil { - ts, err = parseTipSet(api, ctx, cctx.Args().Slice()) + ts, err = parseTipSet(ctx, api, cctx.Args().Slice()) } if err != nil { return err @@ -251,7 +367,7 @@ var chainSetHeadCmd = &cli.Command{ return fmt.Errorf("must pass cids for tipset to set as head") } - if err := api.ChainSetHead(ctx, ts); err != nil { + if err := api.ChainSetHead(ctx, ts.Key()); err != nil { return err } @@ -259,25 +375,6 @@ var chainSetHeadCmd = &cli.Command{ }, } -func parseTipSet(api api.FullNode, ctx context.Context, vals []string) (*types.TipSet, error) { - var headers []*types.BlockHeader - for _, c := range vals { - blkc, err := cid.Decode(c) - if err != nil { - return nil, err - } - - bh, err := api.ChainGetBlock(ctx, blkc) - if err != nil { - return nil, err - } - - headers = append(headers, bh) - } - - return types.NewTipSet(headers) -} - var chainListCmd = &cli.Command{ Name: "list", Usage: "View a segment of the chain", @@ -289,6 +386,10 @@ var chainListCmd = &cli.Command{ Usage: "specify the format to print out tipsets", Value: ": (