diff --git a/.circleci/config.yml b/.circleci/config.yml index 880ddb667..4a69a4a49 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ orbs: executors: golang: docker: - - image: circleci/golang:1.15.5 + - image: circleci/golang:1.16.4 resource_class: 2xlarge ubuntu: docker: @@ -112,7 +112,7 @@ jobs: - run: command: make debug - test: &test + test: description: | Run tests with gotestsum. parameters: &test-params @@ -123,23 +123,20 @@ jobs: type: string default: "-timeout 30m" description: Flags passed to go test. - packages: + target: type: string default: "./..." description: Import paths of packages to be tested. - winpost-test: + proofs-log-test: type: string default: "0" - deadline-test: - type: string - default: "0" - test-suite-name: + suite: type: string default: unit description: Test suite name to report to CircleCI. gotestsum-format: type: string - default: pkgname-and-test-fails + default: standard-verbose description: gotestsum format. https://github.com/gotestyourself/gotestsum#format coverage: type: string @@ -147,7 +144,7 @@ jobs: description: Coverage flag. Set to the empty string to disable. codecov-upload: type: boolean - default: false + default: true description: | Upload coverage report to https://codecov.io/. Requires the codecov API token to be set as an environment variable for private projects. @@ -165,25 +162,24 @@ jobs: - run: name: go test environment: - LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >> - LOTUS_TEST_DEADLINE_TOGGLING: << parameters.deadline-test >> + TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >> SKIP_CONFORMANCE: "1" command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> + mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-artifacts gotestsum \ --format << parameters.gotestsum-format >> \ - --junitfile /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml \ - --jsonfile /tmp/test-artifacts/<< parameters.test-suite-name >>.json \ + --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ + --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \ -- \ << parameters.coverage >> \ << parameters.go-test-flags >> \ - << parameters.packages >> + << parameters.target >> no_output_timeout: 30m - store_test_results: path: /tmp/test-reports - store_artifacts: - path: /tmp/test-artifacts/<< parameters.test-suite-name >>.json + path: /tmp/test-artifacts/<< parameters.suite >>.json - when: condition: << parameters.codecov-upload >> steps: @@ -194,24 +190,6 @@ jobs: command: | bash <(curl -s https://codecov.io/bash) - test-chain: - <<: *test - test-node: - <<: *test - test-storage: - <<: *test - test-cli: - <<: *test - test-short: - <<: *test - test-window-post: - <<: *test - test-window-post-dispute: - <<: *test - test-deadline-toggling: - <<: *test - test-terminate: - <<: *test test-conformance: description: | Run tests using a corpus of interoperable test vectors for Filecoin @@ -331,7 +309,7 @@ jobs: - run: cd extern/filecoin-ffi && make - run: name: "go get lotus@master" - command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. + command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy - run: name: "build lotus-soup testplan" command: pushd testplans/lotus-soup && go build -tags=testground . @@ -379,8 +357,8 @@ jobs: - run: name: Install go command: | - curl -O https://dl.google.com/go/go1.15.5.darwin-amd64.pkg && \ - sudo installer -pkg go1.15.5.darwin-amd64.pkg -target / + curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \ + sudo installer -pkg go1.16.4.darwin-amd64.pkg -target / - run: name: Install pkg-config command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config @@ -456,12 +434,13 @@ jobs: name: prepare workspace command: | mkdir appimage - mv Lotus-latest-x86_64.AppImage appimage + mv Lotus-*.AppImage appimage - persist_to_workspace: root: "." paths: - appimage + gofmt: executor: golang steps: @@ -470,7 +449,7 @@ jobs: - run: command: "! go fmt ./... 2>&1 | read" - cbor-gen-check: + gen-check: executor: golang steps: - install-deps @@ -478,7 +457,10 @@ jobs: - run: make deps - run: go install golang.org/x/tools/cmd/goimports - run: go install github.com/hannahhoward/cbor-gen-for - - run: make type-gen + - run: make gen + - run: git --no-pager diff + - run: git --no-pager diff --quiet + - run: make docsgen-cli - run: git --no-pager diff - run: git --no-pager diff --quiet @@ -561,6 +543,33 @@ jobs: name: Publish release command: ./scripts/publish-release.sh + publish-snapcraft: + description: build and push snapcraft + machine: + image: ubuntu-2004:202104-01 + resource_class: 2xlarge + parameters: + channel: + type: string + default: "edge" + description: snapcraft channel + steps: + - checkout + - run: + name: install snapcraft + command: sudo snap install snapcraft --classic + - run: + name: create snapcraft config file + command: | + mkdir -p ~/.config/snapcraft + echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg + - run: + name: build snap + command: snapcraft --use-lxd + - run: + name: publish snap + command: snapcraft push *.snap --release << parameters.channel >> + build-and-push-image: description: build and push docker images to public AWS ECR registry executor: aws-cli/default @@ -735,61 +744,168 @@ workflows: concurrency: "16" # expend all docker 2xlarge CPUs. - mod-tidy-check - gofmt - - cbor-gen-check + - gen-check - docs-check - test: - codecov-upload: true - test-suite-name: full - - test-chain: - codecov-upload: true - test-suite-name: chain - packages: "./chain/..." - - test-node: - codecov-upload: true - test-suite-name: node - packages: "./node/..." - - test-storage: - codecov-upload: true - test-suite-name: storage - packages: "./storage/... ./extern/..." - - test-cli: - codecov-upload: true - test-suite-name: cli - packages: "./cli/... ./cmd/... ./api/..." - - test-window-post: - codecov-upload: true - go-test-flags: "-run=TestWindowedPost" - winpost-test: "1" - test-suite-name: window-post - - test-window-post-dispute: - codecov-upload: true - go-test-flags: "-run=TestWindowPostDispute" - winpost-test: "1" - test-suite-name: window-post-dispute - - test-terminate: - codecov-upload: true - go-test-flags: "-run=TestTerminate" - winpost-test: "1" - test-suite-name: terminate - - test-deadline-toggling: - codecov-upload: true - go-test-flags: "-run=TestDeadlineToggling" - deadline-test: "1" - test-suite-name: deadline-toggling - - test-short: - go-test-flags: "--timeout 10m --short" - test-suite-name: short - filters: - tags: - only: - - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + name: test-itest-api + suite: itest-api + target: "./itests/api_test.go" + + - test: + name: test-itest-batch_deal + suite: itest-batch_deal + target: "./itests/batch_deal_test.go" + + - test: + name: test-itest-ccupgrade + suite: itest-ccupgrade + target: "./itests/ccupgrade_test.go" + + - test: + name: test-itest-cli + suite: itest-cli + target: "./itests/cli_test.go" + + - test: + name: test-itest-deadlines + suite: itest-deadlines + target: "./itests/deadlines_test.go" + + - test: + name: test-itest-deals_concurrent + suite: itest-deals_concurrent + target: "./itests/deals_concurrent_test.go" + + - test: + name: test-itest-deals_offline + suite: itest-deals_offline + target: "./itests/deals_offline_test.go" + + - test: + name: test-itest-deals_power + suite: itest-deals_power + target: "./itests/deals_power_test.go" + + - test: + name: test-itest-deals_pricing + suite: itest-deals_pricing + target: "./itests/deals_pricing_test.go" + + - test: + name: test-itest-deals_publish + suite: itest-deals_publish + target: "./itests/deals_publish_test.go" + + - test: + name: test-itest-deals + suite: itest-deals + target: "./itests/deals_test.go" + + - test: + name: test-itest-gateway + suite: itest-gateway + target: "./itests/gateway_test.go" + + - test: + name: test-itest-get_messages_in_ts + suite: itest-get_messages_in_ts + target: "./itests/get_messages_in_ts_test.go" + + - test: + name: test-itest-multisig + suite: itest-multisig + target: "./itests/multisig_test.go" + + - test: + name: test-itest-nonce + suite: itest-nonce + target: "./itests/nonce_test.go" + + - test: + name: test-itest-paych_api + suite: itest-paych_api + target: "./itests/paych_api_test.go" + + - test: + name: test-itest-paych_cli + suite: itest-paych_cli + target: "./itests/paych_cli_test.go" + + - test: + name: test-itest-sdr_upgrade + suite: itest-sdr_upgrade + target: "./itests/sdr_upgrade_test.go" + + - test: + name: test-itest-sector_finalize_early + suite: itest-sector_finalize_early + target: "./itests/sector_finalize_early_test.go" + + - test: + name: test-itest-sector_miner_collateral + suite: itest-sector_miner_collateral + target: "./itests/sector_miner_collateral_test.go" + + - test: + name: test-itest-sector_pledge + suite: itest-sector_pledge + target: "./itests/sector_pledge_test.go" + + - test: + name: test-itest-sector_terminate + suite: itest-sector_terminate + target: "./itests/sector_terminate_test.go" + + - test: + name: test-itest-tape + suite: itest-tape + target: "./itests/tape_test.go" + + - test: + name: test-itest-verifreg + suite: itest-verifreg + target: "./itests/verifreg_test.go" + + - test: + name: test-itest-wdpost_dispute + suite: itest-wdpost_dispute + target: "./itests/wdpost_dispute_test.go" + + - test: + name: test-itest-wdpost + suite: itest-wdpost + target: "./itests/wdpost_test.go" + + - test: + name: test-unit-cli + suite: utest-unit-cli + target: "./cli/... ./cmd/... ./api/..." + - test: + name: test-unit-node + suite: utest-unit-node + target: "./node/..." + - test: + name: test-unit-rest + suite: utest-unit-rest + target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..." + - test: + name: test-unit-storage + suite: utest-unit-storage + target: "./storage/... ./extern/..." + - test: + go-test-flags: "-run=TestMulticoreSDR" + suite: multicore-sdr-check + target: "./extern/sector-storage/ffiwrapper" + proofs-log-test: "1" - test-conformance: - test-suite-name: conformance - packages: "./conformance" + suite: conformance + codecov-upload: false + target: "./conformance" - test-conformance: name: test-conformance-bleeding-edge - test-suite-name: conformance-bleeding-edge - packages: "./conformance" + codecov-upload: false + suite: conformance-bleeding-edge + target: "./conformance" vectors-branch: master - trigger-testplans: filters: @@ -798,37 +914,27 @@ workflows: - master - build-debug - build-all: - requires: - - test-short filters: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - build-ntwk-calibration: - requires: - - test-short filters: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - build-ntwk-butterfly: - requires: - - test-short filters: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - build-ntwk-nerpa: - requires: - - test-short filters: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - build-lotus-soup - build-macos: - requires: - - test-short filters: branches: ignore: @@ -837,8 +943,6 @@ workflows: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - build-appimage: - requires: - - test-short filters: branches: ignore: @@ -858,7 +962,6 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - - build-and-push-image: dockerfile: Dockerfile.lotus path: . @@ -904,3 +1007,26 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-snapcraft: + name: publish-snapcraft-stable + channel: stable + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + + nightly: + triggers: + - schedule: + cron: "0 0 * * *" + filters: + branches: + only: + - master + jobs: + - publish-snapcraft: + name: publish-snapcraft-nightly + channel: edge diff --git a/.circleci/gen.go b/.circleci/gen.go new file mode 100644 index 000000000..844348e29 --- /dev/null +++ b/.circleci/gen.go @@ -0,0 +1,136 @@ +package main + +import ( + "embed" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "text/template" +) + +//go:generate go run ./gen.go .. + +//go:embed template.yml +var templateFile embed.FS + +type ( + dirs = []string + suite = string +) + +// groupedUnitTests maps suite names to top-level directories that should be +// included in that suite. The program adds an implicit group "rest" that +// includes all other top-level directories. +var groupedUnitTests = map[suite]dirs{ + "unit-node": {"node"}, + "unit-storage": {"storage", "extern"}, + "unit-cli": {"cli", "cmd", "api"}, +} + +func main() { + if len(os.Args) != 2 { + panic("expected path to repo as argument") + } + + repo := os.Args[1] + + tmpl := template.New("template.yml") + tmpl.Delims("[[", "]]") + tmpl.Funcs(template.FuncMap{ + "stripSuffix": func(in string) string { + return strings.TrimSuffix(in, "_test.go") + }, + }) + tmpl = template.Must(tmpl.ParseFS(templateFile, "*")) + + // list all itests. + itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go")) + if err != nil { + panic(err) + } + + // strip the dir from all entries. + for i, f := range itests { + itests[i] = filepath.Base(f) + } + + // calculate the exclusion set of unit test directories to exclude because + // they are already included in a grouped suite. + var excluded = map[string]struct{}{} + for _, ss := range groupedUnitTests { + for _, s := range ss { + e, err := filepath.Abs(filepath.Join(repo, s)) + if err != nil { + panic(err) + } + excluded[e] = struct{}{} + } + } + + // all unit tests top-level dirs that are not itests, nor included in other suites. + var rest = map[string]struct{}{} + err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error { + // include all tests that aren't in the itests directory. + if strings.Contains(path, "itests") { + return filepath.SkipDir + } + // exclude all tests included in other suites + if f.IsDir() { + if _, ok := excluded[path]; ok { + return filepath.SkipDir + } + } + if strings.HasSuffix(path, "_test.go") { + rel, err := filepath.Rel(repo, path) + if err != nil { + panic(err) + } + // take the first directory + rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{} + } + return err + }) + if err != nil { + panic(err) + } + + // add other directories to a 'rest' suite. + for k := range rest { + groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k) + } + + // map iteration guarantees no order, so sort the array in-place. + sort.Strings(groupedUnitTests["unit-rest"]) + + // form the input data. + type data struct { + ItestFiles []string + UnitSuites map[string]string + } + in := data{ + ItestFiles: itests, + UnitSuites: func() map[string]string { + ret := make(map[string]string) + for name, dirs := range groupedUnitTests { + for i, d := range dirs { + dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package + } + ret[name] = strings.Join(dirs, " ") + } + return ret + }(), + } + + out, err := os.Create("./config.yml") + if err != nil { + panic(err) + } + defer out.Close() + + // execute the template. + if err := tmpl.Execute(out, in); err != nil { + panic(err) + } +} diff --git a/.circleci/template.yml b/.circleci/template.yml new file mode 100644 index 000000000..fb59f23ea --- /dev/null +++ b/.circleci/template.yml @@ -0,0 +1,902 @@ +version: 2.1 +orbs: + go: gotest/tools@0.0.13 + aws-cli: circleci/aws-cli@1.3.2 + packer: salaxander/packer@0.0.3 + +executors: + golang: + docker: + - image: circleci/golang:1.16.4 + resource_class: 2xlarge + ubuntu: + docker: + - image: ubuntu:20.04 + +commands: + install-deps: + steps: + - go/install-ssh + - go/install: {package: git} + prepare: + parameters: + linux: + default: true + description: is a linux build environment? + type: boolean + darwin: + default: false + description: is a darwin build environment? + type: boolean + steps: + - checkout + - git_fetch_all_tags + - checkout + - when: + condition: << parameters.linux >> + steps: + - run: sudo apt-get update + - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev + - run: git submodule sync + - run: git submodule update --init + download-params: + steps: + - restore_cache: + name: Restore parameters cache + keys: + - 'v25-2k-lotus-params' + paths: + - /var/tmp/filecoin-proof-parameters/ + - run: ./lotus fetch-params 2048 + - save_cache: + name: Save parameters cache + key: 'v25-2k-lotus-params' + paths: + - /var/tmp/filecoin-proof-parameters/ + install_ipfs: + steps: + - run: | + apt update + apt install -y wget + wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz + wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512 + if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ] + then + echo "ipfs failed checksum check" + exit 1 + fi + tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz + mv go-ipfs/ipfs /usr/local/bin/ipfs + chmod +x /usr/local/bin/ipfs + git_fetch_all_tags: + steps: + - run: + name: fetch all tags + command: | + git fetch --all + +jobs: + mod-tidy-check: + executor: golang + steps: + - install-deps + - prepare + - go/mod-tidy-check + + build-all: + executor: golang + steps: + - install-deps + - prepare + - run: sudo apt-get update + - run: sudo apt-get install npm + - run: + command: make buildall + - store_artifacts: + path: lotus + - store_artifacts: + path: lotus-miner + - store_artifacts: + path: lotus-worker + - run: mkdir linux && mv lotus lotus-miner lotus-worker linux/ + - persist_to_workspace: + root: "." + paths: + - linux + + build-debug: + executor: golang + steps: + - install-deps + - prepare + - run: + command: make debug + + test: + description: | + Run tests with gotestsum. + parameters: &test-params + executor: + type: executor + default: golang + go-test-flags: + type: string + default: "-timeout 30m" + description: Flags passed to go test. + target: + type: string + default: "./..." + description: Import paths of packages to be tested. + proofs-log-test: + type: string + default: "0" + suite: + type: string + default: unit + description: Test suite name to report to CircleCI. + gotestsum-format: + type: string + default: standard-verbose + description: gotestsum format. https://github.com/gotestyourself/gotestsum#format + coverage: + type: string + default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/... + description: Coverage flag. Set to the empty string to disable. + codecov-upload: + type: boolean + default: true + description: | + Upload coverage report to https://codecov.io/. Requires the codecov API token to be + set as an environment variable for private projects. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make deps lotus + no_output_timeout: 30m + - download-params + - go/install-gotestsum: + gobin: $HOME/.local/bin + version: 0.5.2 + - run: + name: go test + environment: + TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >> + SKIP_CONFORMANCE: "1" + command: | + mkdir -p /tmp/test-reports/<< parameters.suite >> + mkdir -p /tmp/test-artifacts + gotestsum \ + --format << parameters.gotestsum-format >> \ + --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ + --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \ + -- \ + << parameters.coverage >> \ + << parameters.go-test-flags >> \ + << parameters.target >> + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - store_artifacts: + path: /tmp/test-artifacts/<< parameters.suite >>.json + - when: + condition: << parameters.codecov-upload >> + steps: + - go/install: {package: bash} + - go/install: {package: curl} + - run: + shell: /bin/bash -eo pipefail + command: | + bash <(curl -s https://codecov.io/bash) + + test-conformance: + description: | + Run tests using a corpus of interoperable test vectors for Filecoin + implementations to test their correctness and compliance with the Filecoin + specifications. + parameters: + <<: *test-params + vectors-branch: + type: string + default: "" + description: | + Branch on github.com/filecoin-project/test-vectors to checkout and + test with. If empty (the default) the commit defined by the git + submodule is used. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make deps lotus + no_output_timeout: 30m + - download-params + - when: + condition: + not: + equal: [ "", << parameters.vectors-branch >> ] + steps: + - run: + name: checkout vectors branch + command: | + cd extern/test-vectors + git fetch + git checkout origin/<< parameters.vectors-branch >> + - go/install-gotestsum: + gobin: $HOME/.local/bin + version: 0.5.2 + - run: + name: install statediff globally + command: | + ## statediff is optional; we succeed even if compilation fails. + mkdir -p /tmp/statediff + git clone https://github.com/filecoin-project/statediff.git /tmp/statediff + cd /tmp/statediff + go install ./cmd/statediff || exit 0 + - run: + name: go test + environment: + SKIP_CONFORMANCE: "0" + command: | + mkdir -p /tmp/test-reports + mkdir -p /tmp/test-artifacts + gotestsum \ + --format pkgname-and-test-fails \ + --junitfile /tmp/test-reports/junit.xml \ + -- \ + -v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/ + go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - store_artifacts: + path: /tmp/test-artifacts/conformance-coverage.html + build-ntwk-calibration: + description: | + Compile lotus binaries for the calibration network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make calibnet + - run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet + - persist_to_workspace: + root: "." + paths: + - linux-calibrationnet + build-ntwk-butterfly: + description: | + Compile lotus binaries for the butterfly network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make butterflynet + - run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet + - persist_to_workspace: + root: "." + paths: + - linux-butterflynet + build-ntwk-nerpa: + description: | + Compile lotus binaries for the nerpa network + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: make nerpanet + - run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet + - persist_to_workspace: + root: "." + paths: + - linux-nerpanet + build-lotus-soup: + description: | + Compile `lotus-soup` Testground test plan + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: cd extern/filecoin-ffi && make + - run: + name: "go get lotus@master" + command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy + - run: + name: "build lotus-soup testplan" + command: pushd testplans/lotus-soup && go build -tags=testground . + trigger-testplans: + description: | + Trigger `lotus-soup` test cases on TaaS + parameters: + <<: *test-params + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + name: "download testground" + command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli + - run: + name: "prepare .env.toml" + command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml + - run: + name: "prepare testground home dir and link test plans" + command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync + - run: + name: "go get lotus@master" + command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master + - run: + name: "trigger deals baseline testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + - run: + name: "trigger payment channel stress testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + - run: + name: "trigger graphsync testplan on taas" + command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH + + + build-macos: + description: build darwin lotus binary + macos: + xcode: "10.0.0" + working_directory: ~/go/src/github.com/filecoin-project/lotus + steps: + - prepare: + linux: false + darwin: true + - run: + name: Install go + command: | + curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \ + sudo installer -pkg go1.16.4.darwin-amd64.pkg -target / + - run: + name: Install pkg-config + command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config + - run: go version + - run: + name: Install Rust + command: | + curl https://sh.rustup.rs -sSf | sh -s -- -y + - run: + name: Install jq + command: | + curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq + chmod +x /usr/local/bin/jq + - run: + name: Install hwloc + command: | + mkdir ~/hwloc + curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz + cd ~/hwloc + tar -xvzpf hwloc-2.4.1.tar.gz + cd hwloc-2.4.1 + ./configure && make && sudo make install + - restore_cache: + name: restore cargo cache + key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} + - install-deps + - run: + command: make build + no_output_timeout: 30m + - store_artifacts: + path: lotus + - store_artifacts: + path: lotus-miner + - store_artifacts: + path: lotus-worker + - run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/ + - persist_to_workspace: + root: "." + paths: + - darwin + - save_cache: + name: save cargo cache + key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} + paths: + - "~/.rustup" + - "~/.cargo" + + build-appimage: + machine: + image: ubuntu-2004:202104-01 + steps: + - checkout + - attach_workspace: + at: "." + - run: + name: install appimage-builder + command: | + # docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html + sudo apt update + sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace + sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage + sudo chmod +x /usr/local/bin/appimagetool + sudo pip3 install appimage-builder + - run: + name: install lotus dependencies + command: sudo apt install ocl-icd-opencl-dev libhwloc-dev + - run: + name: build appimage + command: | + sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml + make appimage + - run: + name: prepare workspace + command: | + mkdir appimage + mv Lotus-*.AppImage appimage + - persist_to_workspace: + root: "." + paths: + - appimage + + + gofmt: + executor: golang + steps: + - install-deps + - prepare + - run: + command: "! go fmt ./... 2>&1 | read" + + gen-check: + executor: golang + steps: + - install-deps + - prepare + - run: make deps + - run: go install golang.org/x/tools/cmd/goimports + - run: go install github.com/hannahhoward/cbor-gen-for + - run: make gen + - run: git --no-pager diff + - run: git --no-pager diff --quiet + - run: make docsgen-cli + - run: git --no-pager diff + - run: git --no-pager diff --quiet + + docs-check: + executor: golang + steps: + - install-deps + - prepare + - run: go install golang.org/x/tools/cmd/goimports + - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full + - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner + - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker + - run: make deps + - run: make docsgen + - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full + - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner + - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker + - run: git --no-pager diff + - run: diff ../pre-openrpc-full ../post-openrpc-full + - run: diff ../pre-openrpc-miner ../post-openrpc-miner + - run: diff ../pre-openrpc-worker ../post-openrpc-worker + - run: git --no-pager diff --quiet + + lint: &lint + description: | + Run golangci-lint. + parameters: + executor: + type: executor + default: golang + golangci-lint-version: + type: string + default: 1.27.0 + concurrency: + type: string + default: '2' + description: | + Concurrency used to run linters. Defaults to 2 because NumCPU is not + aware of container CPU limits. + args: + type: string + default: '' + description: | + Arguments to pass to golangci-lint + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - run: + command: make deps + no_output_timeout: 30m + - go/install-golangci-lint: + gobin: $HOME/.local/bin + version: << parameters.golangci-lint-version >> + - run: + name: Lint + command: | + $HOME/.local/bin/golangci-lint run -v --timeout 2m \ + --concurrency << parameters.concurrency >> << parameters.args >> + lint-all: + <<: *lint + + publish: + description: publish binary artifacts + executor: ubuntu + steps: + - run: + name: Install git jq curl + command: apt update && apt install -y git jq curl + - checkout + - git_fetch_all_tags + - checkout + - install_ipfs + - attach_workspace: + at: "." + - run: + name: Create bundles + command: ./scripts/build-bundle.sh + - run: + name: Publish release + command: ./scripts/publish-release.sh + + publish-snapcraft: + description: build and push snapcraft + machine: + image: ubuntu-2004:202104-01 + resource_class: 2xlarge + parameters: + channel: + type: string + default: "edge" + description: snapcraft channel + steps: + - checkout + - run: + name: install snapcraft + command: sudo snap install snapcraft --classic + - run: + name: create snapcraft config file + command: | + mkdir -p ~/.config/snapcraft + echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg + - run: + name: build snap + command: snapcraft --use-lxd + - run: + name: publish snap + command: snapcraft push *.snap --release << parameters.channel >> + + build-and-push-image: + description: build and push docker images to public AWS ECR registry + executor: aws-cli/default + parameters: + profile-name: + type: string + default: "default" + description: AWS profile name to be configured. + + aws-access-key-id: + type: env_var_name + default: AWS_ACCESS_KEY_ID + description: > + AWS access key id for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_ACCESS_KEY. + + aws-secret-access-key: + type: env_var_name + default: AWS_SECRET_ACCESS_KEY + description: > + AWS secret key for IAM role. Set this to the name of + the environment variable you will set to hold this + value, i.e. AWS_SECRET_ACCESS_KEY. + + region: + type: env_var_name + default: AWS_REGION + description: > + Name of env var storing your AWS region information, + defaults to AWS_REGION + + account-url: + type: env_var_name + default: AWS_ECR_ACCOUNT_URL + description: > + Env var storing Amazon ECR account URL that maps to an AWS account, + e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com + defaults to AWS_ECR_ACCOUNT_URL + + dockerfile: + type: string + default: Dockerfile + description: Name of dockerfile to use. Defaults to Dockerfile. + + path: + type: string + default: . + description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory). + + extra-build-args: + type: string + default: "" + description: > + Extra flags to pass to docker build. For examples, see + https://docs.docker.com/engine/reference/commandline/build + + repo: + type: string + description: Name of an Amazon ECR repository + + tag: + type: string + default: "latest" + description: A comma-separated string containing docker image tags to build and push (default = latest) + + steps: + - run: + name: Confirm that environment variables are set + command: | + if [ -z "$AWS_ACCESS_KEY_ID" ]; then + echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..." + circleci-agent step halt + fi + + - aws-cli/setup: + profile-name: <> + aws-access-key-id: <> + aws-secret-access-key: <> + aws-region: <> + + - run: + name: Log into Amazon ECR + command: | + aws ecr-public get-login-password --region $<> --profile <> | docker login --username AWS --password-stdin $<> + + - checkout + + - setup_remote_docker: + version: 19.03.13 + docker_layer_caching: false + + - run: + name: Build docker image + command: | + registry_id=$(echo $<> | sed "s;\..*;;g") + + docker_tag_args="" + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker_tag_args="$docker_tag_args -t $<>/<>:$tag" + done + + docker build \ + <<#parameters.extra-build-args>><><> \ + -f <>/<> \ + $docker_tag_args \ + <> + + - run: + name: Push image to Amazon ECR + command: | + IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>" + for tag in "${DOCKER_TAGS[@]}"; do + docker push $<>/<>:${tag} + done + + publish-packer-mainnet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG" + publish-packer-calibrationnet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG" + publish-packer-butterflynet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG" + publish-packer-nerpanet: + description: build and push AWS IAM and DigitalOcean droplet. + executor: + name: packer/default + packer-version: 1.6.6 + steps: + - checkout + - attach_workspace: + at: "." + - packer/build: + template: tools/packer/lotus.pkr.hcl + args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG" + +workflows: + version: 2.1 + ci: + jobs: + - lint-all: + concurrency: "16" # expend all docker 2xlarge CPUs. + - mod-tidy-check + - gofmt + - gen-check + - docs-check + + [[- range $file := .ItestFiles -]] + [[ with $name := $file | stripSuffix ]] + - test: + name: test-itest-[[ $name ]] + suite: itest-[[ $name ]] + target: "./itests/[[ $file ]]" + [[ end ]] + [[- end -]] + + [[range $suite, $pkgs := .UnitSuites]] + - test: + name: test-[[ $suite ]] + suite: utest-[[ $suite ]] + target: "[[ $pkgs ]]" + [[- end]] + - test: + go-test-flags: "-run=TestMulticoreSDR" + suite: multicore-sdr-check + target: "./extern/sector-storage/ffiwrapper" + proofs-log-test: "1" + - test-conformance: + suite: conformance + codecov-upload: false + target: "./conformance" + - test-conformance: + name: test-conformance-bleeding-edge + codecov-upload: false + suite: conformance-bleeding-edge + target: "./conformance" + vectors-branch: master + - trigger-testplans: + filters: + branches: + only: + - master + - build-debug + - build-all: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-calibration: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-butterfly: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-ntwk-nerpa: + filters: + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-lotus-soup + - build-macos: + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-appimage: + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish: + requires: + - build-all + - build-macos + - build-appimage + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - build-and-push-image: + dockerfile: Dockerfile.lotus + path: . + repo: lotus-dev + tag: '${CIRCLE_SHA1:0:8}' + - publish-packer-mainnet: + requires: + - build-all + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-calibrationnet: + requires: + - build-ntwk-calibration + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-butterflynet: + requires: + - build-ntwk-butterfly + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-packer-nerpanet: + requires: + - build-ntwk-nerpa + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-snapcraft: + name: publish-snapcraft-stable + channel: stable + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + + nightly: + triggers: + - schedule: + cron: "0 0 * * *" + filters: + branches: + only: + - master + jobs: + - publish-snapcraft: + name: publish-snapcraft-nightly + channel: edge diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9da543c97..b8ec66f00 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,16 +1,6 @@ -## filecoin-project/lotus CODEOWNERS -## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners. -## -## These users or groups will be automatically assigned as reviewers every time -## a PR is submitted that modifies code in the specified locations. -## -## The Lotus repo configuration requires that at least ONE codeowner approves -## the PR before merging. +# Reference +# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners -### Global owners. -* @magik6k @arajasek - -### Conformance testing. -conformance/ @ZenGround0 -extern/test-vectors @ZenGround0 -cmd/tvx @ZenGround0 +# Global owners +# Ensure maintainers team is a requested reviewer for non-draft PRs +* @filecoin-project/lotus-maintainers diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2bf602a85..33725d70d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -35,6 +35,10 @@ jobs: - name: Checkout repository uses: actions/checkout@v2 + - uses: actions/setup-go@v1 + with: + go-version: '1.16.4' + # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v1 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..20b2feb8a --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,27 @@ +name: Close and mark stale issue + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.' + close-issue-message: 'This issue was closed because it is missing author input.' + stale-issue-label: 'kind/stale' + any-of-labels: 'hint/needs-author-input' + days-before-issue-stale: 5 + days-before-issue-close: 1 + enable-statistics: true + + diff --git a/.github/workflows/testground-on-push.yml b/.github/workflows/testground-on-push.yml new file mode 100644 index 000000000..2a3c8af1d --- /dev/null +++ b/.github/workflows/testground-on-push.yml @@ -0,0 +1,29 @@ +--- +name: Testground PR Checker + +on: [push] + +jobs: + testground: + runs-on: ubuntu-latest + name: ${{ matrix.composition_file }} + strategy: + matrix: + include: + - backend_addr: ci.testground.ipfs.team + backend_proto: https + plan_directory: testplans/lotus-soup + composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml + - backend_addr: ci.testground.ipfs.team + backend_proto: https + plan_directory: testplans/lotus-soup + composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml + steps: + - uses: actions/checkout@v2 + - name: testground run + uses: coryschwartz/testground-github-action@v1.1 + with: + backend_addr: ${{ matrix.backend_addr }} + backend_proto: ${{ matrix.backend_proto }} + plan_directory: ${{ matrix.plan_directory }} + composition_file: ${{ matrix.composition_file }} diff --git a/.gitignore b/.gitignore index e34ebb935..467f315b8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +/AppDir +/appimage-builder-cache +*.AppImage /lotus /lotus-miner /lotus-worker @@ -5,6 +8,7 @@ /lotus-health /lotus-chainwatch /lotus-shed +/lotus-sim /lotus-pond /lotus-townhall /lotus-fountain diff --git a/CHANGELOG.md b/CHANGELOG.md index fe516ec60..b45c6236d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,8 +35,8 @@ Contributors # 1.10.0 / 2021-06-23 -This is a mandatory release of Lotus that introduces Filecoin network v13, codenamed the HyperDrive upgrade. The -Filecoin mainnet will upgrade, which is epoch 892800, on 2021-06-30T22:00:00Z. The network upgrade introduces the +This is a mandatory release of Lotus that introduces Filecoin network v13, codenamed the HyperDrive upgrade. The +Filecoin mainnet will upgrade, which is epoch 892800, on 2021-06-30T22:00:00Z. The network upgrade introduces the following FIPs: - [FIP-0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md): Add miner batched sector pre-commit method @@ -53,10 +53,10 @@ FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.m **Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.** -Note: +Note: - We recommend to keep `PreCommitSectorsBatch` as 1. - We recommend miners to set `PreCommitBatchWait` lower than 30 hours. - - We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures + - We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures due to expirations. ### Projected state tree growth @@ -96,35 +96,35 @@ Included in the HyperDrive upgrade is [FIP-0015](https://github.com/filecoin-pro - Always flush when timer goes off ([filecoin-project/lotus#6563](https://github.com/filecoin-project/lotus/pull/6563)) - Update default fees for aggregates ([filecoin-project/lotus#6548](https://github.com/filecoin-project/lotus/pull/6548)) - sealing: Early finalization option ([filecoin-project/lotus#6452](https://github.com/filecoin-project/lotus/pull/6452)) - - `./lotus-miner/config.toml/[Sealing.FinalizeEarly]`: default to false. Enable if you want to FinalizeSector before commiting + - `./lotus-miner/config.toml/[Sealing.FinalizeEarly]`: default to false. Enable if you want to FinalizeSector before commiting - Add filplus utils to CLI ([filecoin-project/lotus#6351](https://github.com/filecoin-project/lotus/pull/6351)) - cli doc can be found [here](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus.md#lotus-filplus) - Add miner-side MaxDealStartDelay config ([filecoin-project/lotus#6576](https://github.com/filecoin-project/lotus/pull/6576)) - + ### Bug Fixes - chainstore: Don't take heaviestLk with backlogged reorgCh ([filecoin-project/lotus#6526](https://github.com/filecoin-project/lotus/pull/6526)) - Backport #6041 - storagefsm: Fix batch deal packing behavior ([filecoin-project/lotus#6519](https://github.com/filecoin-project/lotus/pull/6519)) -- backport: pick the correct partitions-per-post limit ([filecoin-project/lotus#6503](https://github.com/filecoin-project/lotus/pull/6503)) +- backport: pick the correct partitions-per-post limit ([filecoin-project/lotus#6503](https://github.com/filecoin-project/lotus/pull/6503)) - failed sectors should be added into res correctly ([filecoin-project/lotus#6472](https://github.com/filecoin-project/lotus/pull/6472)) - sealing: Fix restartSectors race ([filecoin-project/lotus#6491](https://github.com/filecoin-project/lotus/pull/6491)) -- Fund miners with the aggregate fee when ProveCommitting ([filecoin-project/lotus#6428](https://github.com/filecoin-project/lotus/pull/6428)) +- Fund miners with the aggregate fee when ProveCommitting ([filecoin-project/lotus#6428](https://github.com/filecoin-project/lotus/pull/6428)) - Commit and Precommit batcher cannot share a getSectorDeadline method ([filecoin-project/lotus#6416](https://github.com/filecoin-project/lotus/pull/6416)) - Fix supported proof type manipulations for v5 actors ([filecoin-project/lotus#6366](https://github.com/filecoin-project/lotus/pull/6366)) - events: Fix handling of multiple matched events per epoch ([filecoin-project/lotus#6362](https://github.com/filecoin-project/lotus/pull/6362)) - Fix randomness fetching around null blocks ([filecoin-project/lotus#6240](https://github.com/filecoin-project/lotus/pull/6240)) - -### Improvements + +### Improvements - Appimage v1.10.0 rc3 ([filecoin-project/lotus#6492](https://github.com/filecoin-project/lotus/pull/6492)) - Expand on Drand change testing ([filecoin-project/lotus#6500](https://github.com/filecoin-project/lotus/pull/6500)) - Backport Fix logging around mineOne ([filecoin-project/lotus#6499](https://github.com/filecoin-project/lotus/pull/6499)) -- mpool: Add more metrics ([filecoin-project/lotus#6453](https://github.com/filecoin-project/lotus/pull/6453)) +- mpool: Add more metrics ([filecoin-project/lotus#6453](https://github.com/filecoin-project/lotus/pull/6453)) - Merge backported PRs into v1.10 release branch ([filecoin-project/lotus#6436](https://github.com/filecoin-project/lotus/pull/6436)) - Fix tests ([filecoin-project/lotus#6371](https://github.com/filecoin-project/lotus/pull/6371)) - Extend the default deal start epoch delay ([filecoin-project/lotus#6350](https://github.com/filecoin-project/lotus/pull/6350)) - sealing: Wire up context to batchers ([filecoin-project/lotus#6497](https://github.com/filecoin-project/lotus/pull/6497)) - Improve address resolution for messages ([filecoin-project/lotus#6364](https://github.com/filecoin-project/lotus/pull/6364)) - + ### Dependency Updates - Proofs v8.0.2 ([filecoin-project/lotus#6524](https://github.com/filecoin-project/lotus/pull/6524)) - Update to fixed Bellperson ([filecoin-project/lotus#6480](https://github.com/filecoin-project/lotus/pull/6480)) @@ -135,7 +135,7 @@ Included in the HyperDrive upgrade is [FIP-0015](https://github.com/filecoin-pro - github.com/filecoin-project/go-hamt-ipld/v3 (v3.0.1 -> v3.1.0) - github.com/ipfs/go-log/v2 (v2.1.2-0.20200626104915-0016c0b4b3e4 -> v2.1.3) - github.com/filecoin-project/go-amt-ipld/v3 (v3.0.0 -> v3.1.0) - + ### Network Version v13 HyperDrive Upgrade - Set HyperDrive upgrade epoch ([filecoin-project/lotus#6565](https://github.com/filecoin-project/lotus/pull/6565)) - version bump to lotus v1.10.0-rc6 ([filecoin-project/lotus#6529](https://github.com/filecoin-project/lotus/pull/6529)) @@ -241,143 +241,6 @@ This is an optional Lotus release that introduces various improvements to the se - fix health report (https://github.com/filecoin-project/lotus/pull/6011) - fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011)) -# 1.9.0-rc4 / 2021-05-13 - -This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. - -## Highlights - -- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843) -- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876) -- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892) -- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871) -- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833) -- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917) -- go-fil-markets v1.1.9 -> v1.2.5 - - For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md -- rust-fil-proofs v5.4.1 -> v7.0.1 - - For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md - -## Changes -- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962) -- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743) -- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778) -- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799) -- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851) -- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709) -- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875) -- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891) -- State CLI improvements (State CLI improvements) -- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854) -- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791) -- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693) -- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864) -- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805) -- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976) -- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800) -- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867) -- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092) -- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819) -- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900) -- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974) -- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850) -- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814) -- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838) -- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840) -- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184) -- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999) -- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881) -- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270) -- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884) -- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930) -- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971) -- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972) -- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992) -- Add a mining-heartbeat INFO line at every epoch (https://github.com/filecoin-project/lotus/pull/6183) -- chore(ci): Enable build on RC tags (https://github.com/filecoin-project/lotus/pull/6245) -- Upgrade nerpa to actor v4 and bump the version to rc4 (https://github.com/filecoin-project/lotus/pull/6249) -## Fixes -- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796) -- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792) -- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801) -- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653) -- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804) -- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794) -- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879) -- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934) -- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807) -- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003) -- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943) -- fix health report (https://github.com/filecoin-project/lotus/pull/6011) - - -# 1.9.0-rc2 / 2021-04-30 - -This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes. - -## Highlights - -- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843) -- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876) -- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892) -- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871) -- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833) -- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917) -- go-fil-markets v1.1.9 -> v1.2.5 - - For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md -- rust-fil-proofs v5.4.1 -> v7 - - For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md - -## Changes -- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962) -- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743) -- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778) -- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799) -- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851) -- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709) -- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875) -- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891) -- State CLI improvements (State CLI improvements) -- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854) -- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791) -- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693) -- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864) -- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805) -- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976) -- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800) -- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867) -- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092) -- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819) -- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900) -- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974) -- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850) -- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814) -- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838) -- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840) -- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184) -- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999) -- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881) -- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270) -- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884) -- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930) -- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971) -- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972) -- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992) - -## Fixes -- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796) -- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792) -- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801) -- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653) -- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804) -- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794) -- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879) -- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934) -- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807) -- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003) -- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943) -- fix health report (https://github.com/filecoin-project/lotus/pull/6011) - # 1.8.0 / 2021-04-05 This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z. diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 43d8fbc23..0b43ef806 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -1,4 +1,4 @@ -FROM golang:1.15.6 AS builder-deps +FROM golang:1.16.4 AS builder-deps MAINTAINER Lotus Development Team RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev diff --git a/Makefile b/Makefile index 9e4558c25..4e03d1b6a 100644 --- a/Makefile +++ b/Makefile @@ -6,9 +6,9 @@ all: build unexport GOFLAGS GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}') -ifeq ($(shell expr $(GOVERSION) \< 1015005), 1) +ifeq ($(shell expr $(GOVERSION) \< 1016000), 1) $(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000)) -$(error Update Golang to version to at least 1.15.5) +$(error Update Golang to version to at least 1.16.0) endif # git modules that need to be loaded @@ -47,7 +47,6 @@ BUILD_DEPS+=ffi-version-check .PHONY: ffi-version-check - $(MODULES): build/.update-modules ; # dummy file that marks the last time modules were updated build/.update-modules: @@ -81,10 +80,12 @@ nerpanet: build-devnets butterflynet: GOFLAGS+=-tags=butterflynet butterflynet: build-devnets +interopnet: GOFLAGS+=-tags=interopnet +interopnet: build-devnets + lotus: $(BUILD_DEPS) rm -f lotus go build $(GOFLAGS) -o lotus ./cmd/lotus - go run github.com/GeertJohan/go.rice/rice append --exec lotus -i ./build .PHONY: lotus BINS+=lotus @@ -92,21 +93,18 @@ BINS+=lotus lotus-miner: $(BUILD_DEPS) rm -f lotus-miner go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner - go run github.com/GeertJohan/go.rice/rice append --exec lotus-miner -i ./build .PHONY: lotus-miner BINS+=lotus-miner lotus-worker: $(BUILD_DEPS) rm -f lotus-worker go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker - go run github.com/GeertJohan/go.rice/rice append --exec lotus-worker -i ./build .PHONY: lotus-worker BINS+=lotus-worker lotus-shed: $(BUILD_DEPS) rm -f lotus-shed go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed - go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build .PHONY: lotus-shed BINS+=lotus-shed @@ -138,7 +136,6 @@ install-worker: lotus-seed: $(BUILD_DEPS) rm -f lotus-seed go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed - go run github.com/GeertJohan/go.rice/rice append --exec lotus-seed -i ./build .PHONY: lotus-seed BINS+=lotus-seed @@ -172,13 +169,11 @@ lotus-townhall-front: .PHONY: lotus-townhall-front lotus-townhall-app: lotus-touch lotus-townhall-front - go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build .PHONY: lotus-townhall-app lotus-fountain: rm -f lotus-fountain go build -o lotus-fountain ./cmd/lotus-fountain - go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build .PHONY: lotus-fountain BINS+=lotus-fountain @@ -191,28 +186,24 @@ BINS+=lotus-chainwatch lotus-bench: rm -f lotus-bench go build -o lotus-bench ./cmd/lotus-bench - go run github.com/GeertJohan/go.rice/rice append --exec lotus-bench -i ./build .PHONY: lotus-bench BINS+=lotus-bench lotus-stats: rm -f lotus-stats go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats - go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build .PHONY: lotus-stats BINS+=lotus-stats lotus-pcr: rm -f lotus-pcr go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr - go run github.com/GeertJohan/go.rice/rice append --exec lotus-pcr -i ./build .PHONY: lotus-pcr BINS+=lotus-pcr lotus-health: rm -f lotus-health go build -o lotus-health ./cmd/lotus-health - go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build .PHONY: lotus-health BINS+=lotus-health @@ -243,6 +234,12 @@ BINS+=tvx install-chainwatch: lotus-chainwatch install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch +lotus-sim: $(BUILD_DEPS) + rm -f lotus-sim + go build $(GOFLAGS) -o lotus-sim ./cmd/lotus-sim +.PHONY: lotus-sim +BINS+=lotus-sim + # SYSTEMD install-daemon-service: install-daemon @@ -303,17 +300,10 @@ clean-services: clean-all-services buildall: $(BINS) -completions: - ./scripts/make-completions.sh lotus - ./scripts/make-completions.sh lotus-miner -.PHONY: completions - install-completions: mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/ install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus - install -C ./scripts/bash-completion/lotus-miner /usr/share/bash-completion/completions/lotus-miner install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus - install -C ./scripts/zsh-completion/lotus-miner /usr/local/share/zsh/site-functions/_lotus-miner clean: rm -rf $(CLEAN) $(BINS) @@ -343,17 +333,15 @@ api-gen: goimports -w api .PHONY: api-gen -appimage: $(BUILD_DEPS) +appimage: lotus rm -rf appimage-builder-cache || true rm AppDir/io.filecoin.lotus.desktop || true rm AppDir/icon.svg || true rm Appdir/AppRun || true mkdir -p AppDir/usr/bin - rm -rf lotus - go run github.com/GeertJohan/go.rice/rice embed-go -i ./build - go build $(GOFLAGS) -o lotus ./cmd/lotus cp ./lotus AppDir/usr/bin/ appimage-builder + docsgen: docsgen-md docsgen-openrpc docsgen-md-bin: api-gen actors-gen @@ -382,8 +370,21 @@ docsgen-openrpc-worker: docsgen-openrpc-bin .PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin -gen: actors-gen type-gen method-gen docsgen api-gen +gen: actors-gen type-gen method-gen docsgen api-gen circleci + @echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli" .PHONY: gen +snap: lotus lotus-miner lotus-worker + snapcraft + # snapcraft upload ./lotus_*.snap + +# separate from gen because it needs binaries +docsgen-cli: lotus lotus-miner lotus-worker + python ./scripts/generate-lotus-cli.py +.PHONY: docsgen-cli + print-%: @echo $*=$($*) + +circleci: + go generate -x ./.circleci \ No newline at end of file diff --git a/README.md b/README.md index 636c01b44..a44c69006 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ - +

@@ -18,7 +18,9 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more ## Building & Documentation -For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). +> Note: The default `master` branch is the dev branch, please use with caution. For the latest stable version, checkout the most recent [`Latest release`](https://github.com/filecoin-project/lotus/releases). + +For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme. ## Reporting a Vulnerability @@ -50,6 +52,88 @@ When implementing a change: 7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description. 8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message. +## Basic Build Instructions +**System-specific Software Dependencies**: + +Building Lotus requires some system dependencies, usually provided by your distribution. + +Ubuntu/Debian: +``` +sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y +``` + +Fedora: +``` +sudo dnf -y install gcc make git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm wget hwloc libhwloc-dev +``` + +For other distributions you can find the required dependencies [here.](https://docs.filecoin.io/get-started/lotus/installation/#system-specific) For instructions specific to macOS, you can find them [here.](https://docs.filecoin.io/get-started/lotus/installation/#macos) + +#### Go + +To build Lotus, you need a working installation of [Go 1.16.4 or higher](https://golang.org/dl/): + +```bash +wget -c https://golang.org/dl/go1.16.4.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +``` + +**TIP:** +You'll need to add `/usr/local/go/bin` to your path. For most Linux distributions you can run something like: + +```shell +echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.bashrc && source ~/.bashrc +``` + +See the [official Golang installation instructions](https://golang.org/doc/install) if you get stuck. + +### Build and install Lotus + +Once all the dependencies are installed, you can build and install the Lotus suite (`lotus`, `lotus-miner`, and `lotus-worker`). + +1. Clone the repository: + + ```sh + git clone https://github.com/filecoin-project/lotus.git + cd lotus/ + ``` + +Note: The default branch `master` is the dev branch where the latest new features, bug fixes and improvement are in. However, if you want to run lotus on Filecoin mainnet and want to run a production-ready lotus, get the latest release[ here](https://github.com/filecoin-project/lotus/releases). + +2. To join mainnet, checkout the [latest release](https://github.com/filecoin-project/lotus/releases). + + If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://docs.filecoin.io/get-started/lotus/switch-networks/) before proceeding. + + For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Lotus for your specific network below. + + ```sh + git checkout + # For example: + git checkout # tag for a release + ``` + + Currently, the latest code on the _master_ branch corresponds to mainnet. + +3. If you are in China, see "[Lotus: tips when running in China](https://docs.filecoin.io/get-started/lotus/tips-running-in-china/)". +4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://docs.filecoin.io/get-started/lotus/installation/#build-and-install-lotus). Note, if you are building the proof binaries from source, [installing rustup](https://docs.filecoin.io/get-started/lotus/installation/#rustup) is also needed. + +5. Build and install Lotus: + + ```sh + make clean all #mainnet + + # Or to join a testnet or devnet: + make clean calibnet # Calibration with min 32GiB sectors + make clean nerpanet # Nerpa with min 512MiB sectors + + sudo make install + ``` + + This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`. + + `lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the Lotus folder. + +6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://docs.filecoin.io/get-started/lotus/installation/#start-the-lotus-daemon-and-sync-the-chain). + ## License Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) diff --git a/api/api_full.go b/api/api_full.go index a90b0c89f..714f15126 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -104,6 +104,9 @@ type FullNode interface { // specified block. ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read + // ChainGetMessagesInTipset returns message stores in current tipset + ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]Message, error) //perm:read + // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. // If there are no blocks at the specified epoch, a tipset at an earlier epoch // will be returned. @@ -252,6 +255,13 @@ type FullNode interface { // MpoolBatchPushMessage batch pushes a unsigned message to mempool. MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign + // MpoolCheckMessages performs logical checks on a batch of messages + MpoolCheckMessages(context.Context, []*MessagePrototype) ([][]MessageCheckStatus, error) //perm:read + // MpoolCheckPendingMessages performs logical checks for all pending messages from a given address + MpoolCheckPendingMessages(context.Context, address.Address) ([][]MessageCheckStatus, error) //perm:read + // MpoolCheckReplaceMessages performs logical checks on pending messages with replacement + MpoolCheckReplaceMessages(context.Context, []*types.Message) ([][]MessageCheckStatus, error) //perm:read + // MpoolGetNonce gets next nonce for the specified sender. // Note that this method may not be atomic. Use MpoolPushMessage instead. MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read @@ -316,6 +326,8 @@ type FullNode interface { ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin // ClientStartDeal proposes a deal with a miner. ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin + // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write // ClientGetDealInfo returns the latest information about a given deal. ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read // ClientListDeals returns information about the deals made by the local client. @@ -335,6 +347,10 @@ type FullNode interface { // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel // of status updates. ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + // ClientListRetrievals returns information about retrievals made by the local client + ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write + // ClientGetRetrievalUpdates returns status of updated retrieval deals + ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write // ClientQueryAsk returns a signed StorageAsk from the specified miner. ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read // ClientCalcCommP calculates the CommP and data size of the specified CID @@ -579,15 +595,16 @@ type FullNode interface { // MsigCreate creates a multisig wallet // It takes the following params: , , //, , - MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign + MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (*MessagePrototype, error) //perm:sign + // MsigPropose proposes a multisig message // It takes the following params: , , , // , , - MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign + MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign // MsigApprove approves a previously-proposed multisig message by transaction ID // It takes the following params: , - MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign + MsigApprove(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign // MsigApproveTxnHash approves a previously-proposed multisig message, specified // using both transaction ID and a hash of the parameters used in the @@ -595,43 +612,49 @@ type FullNode interface { // exactly the transaction you think you are. // It takes the following params: , , , , , // , , - MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign + MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign // MsigCancel cancels a previously-proposed multisig message // It takes the following params: , , , , // , , - MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign + MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + // MsigAddPropose proposes adding a signer in the multisig // It takes the following params: , , // , - MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign + MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign + // MsigAddApprove approves a previously proposed AddSigner message // It takes the following params: , , , // , , - MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign + MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign + // MsigAddCancel cancels a previously proposed AddSigner message // It takes the following params: , , , // , - MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign + MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (*MessagePrototype, error) //perm:sign + // MsigSwapPropose proposes swapping 2 signers in the multisig // It takes the following params: , , // , - MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign + MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign + // MsigSwapApprove approves a previously proposed SwapSigner // It takes the following params: , , , // , , - MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign + MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign + // MsigSwapCancel cancels a previously proposed SwapSigner message // It takes the following params: , , , // , - MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign + MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (*MessagePrototype, error) //perm:sign // MsigRemoveSigner proposes the removal of a signer from the multisig. // It accepts the multisig to make the change on, the proposer address to // send the message from, the address to be removed, and a boolean // indicating whether or not the signing threshold should be lowered by one // along with the address removal. - MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign + MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*MessagePrototype, error) //perm:sign // MarketAddBalance adds funds to the market actor MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign @@ -664,6 +687,11 @@ type FullNode interface { PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign + // MethodGroup: Node + // These methods are general node management and status commands + + NodeStatus(ctx context.Context, inclChainStatus bool) (NodeStatus, error) //perm:read + // CreateBackup creates node backup onder the specified file name. The // method requires that the lotus daemon is running with the // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that diff --git a/api/api_gateway.go b/api/api_gateway.go index 130a18c55..0ee66ac17 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -58,4 +58,5 @@ type Gateway interface { StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) WalletBalance(context.Context, address.Address) (types.BigInt, error) + Version(context.Context) (APIVersion, error) } diff --git a/api/api_storage.go b/api/api_storage.go index 0ccfbd88f..e1756eb1f 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -55,6 +55,13 @@ type StorageMiner interface { // Get the status of a given sector by ID SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read + // Add piece to an open sector. If no sectors with enough space are open, + // either a new sector will be created, or this call will block until more + // sectors can be created. + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin + + SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin + // List all staged sectors SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read @@ -135,8 +142,8 @@ type StorageMiner interface { StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin + StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin - StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin @@ -279,15 +286,17 @@ type AddrUse int const ( PreCommitAddr AddrUse = iota CommitAddr + DealPublishAddr PoStAddr TerminateSectorsAddr ) type AddressConfig struct { - PreCommitControl []address.Address - CommitControl []address.Address - TerminateControl []address.Address + PreCommitControl []address.Address + CommitControl []address.Address + TerminateControl []address.Address + DealPublishControl []address.Address DisableOwnerFallback bool DisableWorkerFallback bool @@ -300,3 +309,25 @@ type PendingDealInfo struct { PublishPeriodStart time.Time PublishPeriod time.Duration } + +type SectorOffset struct { + Sector abi.SectorNumber + Offset abi.PaddedPieceSize +} + +// DealInfo is a tuple of deal identity and its schedule +type PieceDealInfo struct { + PublishCid *cid.Cid + DealID abi.DealID + DealProposal *market.DealProposal + DealSchedule DealSchedule + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a storage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} diff --git a/api/api_wallet.go b/api/api_wallet.go index 891b2fabb..973aaaf6d 100644 --- a/api/api_wallet.go +++ b/api/api_wallet.go @@ -35,13 +35,13 @@ type MsgMeta struct { } type Wallet interface { - WalletNew(context.Context, types.KeyType) (address.Address, error) - WalletHas(context.Context, address.Address) (bool, error) - WalletList(context.Context) ([]address.Address, error) + WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:admin + WalletHas(context.Context, address.Address) (bool, error) //perm:admin + WalletList(context.Context) ([]address.Address, error) //perm:admin - WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) + WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) //perm:admin - WalletExport(context.Context, address.Address) (*types.KeyInfo, error) - WalletImport(context.Context, *types.KeyInfo) (address.Address, error) - WalletDelete(context.Context, address.Address) error + WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin + WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin + WalletDelete(context.Context, address.Address) error //perm:admin } diff --git a/api/api_worker.go b/api/api_worker.go index e834b792c..4553c30e0 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -2,7 +2,6 @@ package api import ( "context" - "io" "github.com/google/uuid" "github.com/ipfs/go-cid" @@ -43,7 +42,6 @@ type Worker interface { ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin - ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) //perm:admin Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin diff --git a/api/cbor_gen.go b/api/cbor_gen.go index 808e516ad..4434b45ed 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -8,6 +8,7 @@ import ( "sort" abi "github.com/filecoin-project/go-state-types/abi" + market "github.com/filecoin-project/specs-actors/actors/builtin/market" paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -738,3 +739,381 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error { return nil } +func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{165}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DealProposal (market.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(w); err != nil { + return err + } + + // t.DealSchedule (api.DealSchedule) (struct) + if len("DealSchedule") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealSchedule\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealSchedule")); err != nil { + return err + } + + if err := t.DealSchedule.MarshalCBOR(w); err != nil { + return err + } + + // t.KeepUnsealed (bool) (bool) + if len("KeepUnsealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { + return err + } + return nil +} + +func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error { + *t = PieceDealInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DealProposal (market.DealProposal) (struct) + case "DealProposal": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.DealProposal) + if err := t.DealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.DealSchedule (api.DealSchedule) (struct) + case "DealSchedule": + + { + + if err := t.DealSchedule.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) + } + + } + // t.KeepUnsealed (bool) (bool) + case "KeepUnsealed": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.KeepUnsealed = false + case 21: + t.KeepUnsealed = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealSchedule) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if len("EndEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"EndEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("EndEpoch")); err != nil { + return err + } + + if t.EndEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + return nil +} + +func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error { + *t = DealSchedule{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + // t.EndEpoch (abi.ChainEpoch) (int64) + case "EndEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/api/checkstatuscode_string.go b/api/checkstatuscode_string.go new file mode 100644 index 000000000..072f77989 --- /dev/null +++ b/api/checkstatuscode_string.go @@ -0,0 +1,35 @@ +// Code generated by "stringer -type=CheckStatusCode -trimprefix=CheckStatus"; DO NOT EDIT. + +package api + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CheckStatusMessageSerialize-1] + _ = x[CheckStatusMessageSize-2] + _ = x[CheckStatusMessageValidity-3] + _ = x[CheckStatusMessageMinGas-4] + _ = x[CheckStatusMessageMinBaseFee-5] + _ = x[CheckStatusMessageBaseFee-6] + _ = x[CheckStatusMessageBaseFeeLowerBound-7] + _ = x[CheckStatusMessageBaseFeeUpperBound-8] + _ = x[CheckStatusMessageGetStateNonce-9] + _ = x[CheckStatusMessageNonce-10] + _ = x[CheckStatusMessageGetStateBalance-11] + _ = x[CheckStatusMessageBalance-12] +} + +const _CheckStatusCode_name = "MessageSerializeMessageSizeMessageValidityMessageMinGasMessageMinBaseFeeMessageBaseFeeMessageBaseFeeLowerBoundMessageBaseFeeUpperBoundMessageGetStateNonceMessageNonceMessageGetStateBalanceMessageBalance" + +var _CheckStatusCode_index = [...]uint8{0, 16, 27, 42, 55, 72, 86, 110, 134, 154, 166, 188, 202} + +func (i CheckStatusCode) String() string { + i -= 1 + if i < 0 || i >= CheckStatusCode(len(_CheckStatusCode_index)-1) { + return "CheckStatusCode(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _CheckStatusCode_name[_CheckStatusCode_index[i]:_CheckStatusCode_index[i+1]] +} diff --git a/api/client/client.go b/api/client/client.go index 90fe714bf..2c18a289b 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -52,8 +52,30 @@ func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Heade return &res, closer, err } +func getPushUrl(addr string) (string, error) { + pushUrl, err := url.Parse(addr) + if err != nil { + return "", err + } + switch pushUrl.Scheme { + case "ws": + pushUrl.Scheme = "http" + case "wss": + pushUrl.Scheme = "https" + } + ///rpc/v0 -> /rpc/streams/v0/push + + pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push") + return pushUrl.String(), nil +} + // NewStorageMinerRPCV0 creates a new http jsonrpc client for miner func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) { + pushUrl, err := getPushUrl(addr) + if err != nil { + return nil, nil, err + } + var res v0api.StorageMinerStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", []interface{}{ @@ -61,26 +83,19 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H &res.Internal, }, requestHeader, - opts..., + append([]jsonrpc.Option{ + rpcenc.ReaderParamEncoder(pushUrl), + }, opts...)..., ) return &res, closer, err } -func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Worker, jsonrpc.ClientCloser, error) { - u, err := url.Parse(addr) +func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) { + pushUrl, err := getPushUrl(addr) if err != nil { return nil, nil, err } - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - } - ///rpc/v0 -> /rpc/streams/v0/push - - u.Path = path.Join(u.Path, "../streams/v0/push") var res api.WorkerStruct closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", @@ -88,7 +103,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) &res.Internal, }, requestHeader, - rpcenc.ReaderParamEncoder(u.String()), + rpcenc.ReaderParamEncoder(pushUrl), jsonrpc.WithNoReconnect(), jsonrpc.WithTimeout(30*time.Second), ) diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 8357ff9b5..4f9bc637e 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -261,6 +261,9 @@ func init() { }, "methods": []interface{}{}}, ) + + addExample(api.CheckStatusCode(0)) + addExample(map[string]interface{}{"abc": 123}) } func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruct reflect.Type) { diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 4336a56f9..69f315be9 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -37,30 +37,30 @@ import ( protocol "github.com/libp2p/go-libp2p-core/protocol" ) -// MockFullNode is a mock of FullNode interface +// MockFullNode is a mock of FullNode interface. type MockFullNode struct { ctrl *gomock.Controller recorder *MockFullNodeMockRecorder } -// MockFullNodeMockRecorder is the mock recorder for MockFullNode +// MockFullNodeMockRecorder is the mock recorder for MockFullNode. type MockFullNodeMockRecorder struct { mock *MockFullNode } -// NewMockFullNode creates a new mock instance +// NewMockFullNode creates a new mock instance. func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode { mock := &MockFullNode{ctrl: ctrl} mock.recorder = &MockFullNodeMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { return m.recorder } -// AuthNew mocks base method +// AuthNew mocks base method. func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) @@ -69,13 +69,13 @@ func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([] return ret0, ret1 } -// AuthNew indicates an expected call of AuthNew +// AuthNew indicates an expected call of AuthNew. func (mr *MockFullNodeMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockFullNode)(nil).AuthNew), arg0, arg1) } -// AuthVerify mocks base method +// AuthVerify mocks base method. func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1) @@ -84,13 +84,13 @@ func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Per return ret0, ret1 } -// AuthVerify indicates an expected call of AuthVerify +// AuthVerify indicates an expected call of AuthVerify. func (mr *MockFullNodeMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockFullNode)(nil).AuthVerify), arg0, arg1) } -// BeaconGetEntry mocks base method +// BeaconGetEntry mocks base method. func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1) @@ -99,13 +99,13 @@ func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) return ret0, ret1 } -// BeaconGetEntry indicates an expected call of BeaconGetEntry +// BeaconGetEntry indicates an expected call of BeaconGetEntry. func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1) } -// ChainDeleteObj mocks base method +// ChainDeleteObj mocks base method. func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1) @@ -113,13 +113,13 @@ func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error return ret0 } -// ChainDeleteObj indicates an expected call of ChainDeleteObj +// ChainDeleteObj indicates an expected call of ChainDeleteObj. func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1) } -// ChainExport mocks base method +// ChainExport mocks base method. func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3) @@ -128,13 +128,13 @@ func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, ar return ret0, ret1 } -// ChainExport indicates an expected call of ChainExport +// ChainExport indicates an expected call of ChainExport. func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) } -// ChainGetBlock mocks base method +// ChainGetBlock mocks base method. func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1) @@ -143,13 +143,13 @@ func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types return ret0, ret1 } -// ChainGetBlock indicates an expected call of ChainGetBlock +// ChainGetBlock indicates an expected call of ChainGetBlock. func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1) } -// ChainGetBlockMessages mocks base method +// ChainGetBlockMessages mocks base method. func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*api.BlockMessages, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1) @@ -158,13 +158,13 @@ func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) return ret0, ret1 } -// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages +// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages. func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1) } -// ChainGetGenesis mocks base method +// ChainGetGenesis mocks base method. func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetGenesis", arg0) @@ -173,13 +173,13 @@ func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, err return ret0, ret1 } -// ChainGetGenesis indicates an expected call of ChainGetGenesis +// ChainGetGenesis indicates an expected call of ChainGetGenesis. func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0) } -// ChainGetMessage mocks base method +// ChainGetMessage mocks base method. func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*types.Message, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1) @@ -188,13 +188,28 @@ func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*typ return ret0, ret1 } -// ChainGetMessage indicates an expected call of ChainGetMessage +// ChainGetMessage indicates an expected call of ChainGetMessage. func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) } -// ChainGetNode mocks base method +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + +// ChainGetNode mocks base method. func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetNode", arg0, arg1) @@ -203,13 +218,13 @@ func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.Ipl return ret0, ret1 } -// ChainGetNode indicates an expected call of ChainGetNode +// ChainGetNode indicates an expected call of ChainGetNode. func (mr *MockFullNodeMockRecorder) ChainGetNode(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetNode", reflect.TypeOf((*MockFullNode)(nil).ChainGetNode), arg0, arg1) } -// ChainGetParentMessages mocks base method +// ChainGetParentMessages mocks base method. func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]api.Message, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1) @@ -218,13 +233,13 @@ func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid return ret0, ret1 } -// ChainGetParentMessages indicates an expected call of ChainGetParentMessages +// ChainGetParentMessages indicates an expected call of ChainGetParentMessages. func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1) } -// ChainGetParentReceipts mocks base method +// ChainGetParentReceipts mocks base method. func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1) @@ -233,13 +248,13 @@ func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid return ret0, ret1 } -// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts +// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts. func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1) } -// ChainGetPath mocks base method +// ChainGetPath mocks base method. func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*api.HeadChange, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2) @@ -248,13 +263,13 @@ func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSe return ret0, ret1 } -// ChainGetPath indicates an expected call of ChainGetPath +// ChainGetPath indicates an expected call of ChainGetPath. func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2) } -// ChainGetRandomnessFromBeacon mocks base method +// ChainGetRandomnessFromBeacon mocks base method. func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4) @@ -263,13 +278,13 @@ func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 t return ret0, ret1 } -// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon +// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon. func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4) } -// ChainGetRandomnessFromTickets mocks base method +// ChainGetRandomnessFromTickets mocks base method. func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4) @@ -278,13 +293,13 @@ func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 return ret0, ret1 } -// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets +// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets. func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4) } -// ChainGetTipSet mocks base method +// ChainGetTipSet mocks base method. func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1) @@ -293,13 +308,13 @@ func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey return ret0, ret1 } -// ChainGetTipSet indicates an expected call of ChainGetTipSet +// ChainGetTipSet indicates an expected call of ChainGetTipSet. func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1) } -// ChainGetTipSetByHeight mocks base method +// ChainGetTipSetByHeight mocks base method. func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2) @@ -308,13 +323,13 @@ func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.Cha return ret0, ret1 } -// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight +// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight. func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2) } -// ChainHasObj mocks base method +// ChainHasObj mocks base method. func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) @@ -323,13 +338,13 @@ func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, er return ret0, ret1 } -// ChainHasObj indicates an expected call of ChainHasObj +// ChainHasObj indicates an expected call of ChainHasObj. func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1) } -// ChainHead mocks base method +// ChainHead mocks base method. func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainHead", arg0) @@ -338,13 +353,13 @@ func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { return ret0, ret1 } -// ChainHead indicates an expected call of ChainHead +// ChainHead indicates an expected call of ChainHead. func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) } -// ChainNotify mocks base method +// ChainNotify mocks base method. func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainNotify", arg0) @@ -353,13 +368,13 @@ func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChan return ret0, ret1 } -// ChainNotify indicates an expected call of ChainNotify +// ChainNotify indicates an expected call of ChainNotify. func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0) } -// ChainReadObj mocks base method +// ChainReadObj mocks base method. func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1) @@ -368,13 +383,13 @@ func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, return ret0, ret1 } -// ChainReadObj indicates an expected call of ChainReadObj +// ChainReadObj indicates an expected call of ChainReadObj. func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1) } -// ChainSetHead mocks base method +// ChainSetHead mocks base method. func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1) @@ -382,13 +397,13 @@ func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) return ret0 } -// ChainSetHead indicates an expected call of ChainSetHead +// ChainSetHead indicates an expected call of ChainSetHead. func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1) } -// ChainStatObj mocks base method +// ChainStatObj mocks base method. func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (api.ObjStat, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2) @@ -397,13 +412,13 @@ func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (a return ret0, ret1 } -// ChainStatObj indicates an expected call of ChainStatObj +// ChainStatObj indicates an expected call of ChainStatObj. func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2) } -// ChainTipSetWeight mocks base method +// ChainTipSetWeight mocks base method. func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1) @@ -412,13 +427,13 @@ func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSet return ret0, ret1 } -// ChainTipSetWeight indicates an expected call of ChainTipSetWeight +// ChainTipSetWeight indicates an expected call of ChainTipSetWeight. func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) } -// ClientCalcCommP mocks base method +// ClientCalcCommP mocks base method. func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) @@ -427,13 +442,13 @@ func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api. return ret0, ret1 } -// ClientCalcCommP indicates an expected call of ClientCalcCommP +// ClientCalcCommP indicates an expected call of ClientCalcCommP. func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1) } -// ClientCancelDataTransfer mocks base method +// ClientCancelDataTransfer mocks base method. func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) @@ -441,13 +456,13 @@ func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datat return ret0 } -// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer +// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer. func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) } -// ClientCancelRetrievalDeal mocks base method +// ClientCancelRetrievalDeal mocks base method. func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1) @@ -455,13 +470,13 @@ func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retr return ret0 } -// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal +// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal. func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1) } -// ClientDataTransferUpdates mocks base method +// ClientDataTransferUpdates mocks base method. func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) @@ -470,13 +485,13 @@ func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan a return ret0, ret1 } -// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates +// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates. func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0) } -// ClientDealPieceCID mocks base method +// ClientDealPieceCID mocks base method. func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) @@ -485,13 +500,13 @@ func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (a return ret0, ret1 } -// ClientDealPieceCID indicates an expected call of ClientDealPieceCID +// ClientDealPieceCID indicates an expected call of ClientDealPieceCID. func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1) } -// ClientDealSize mocks base method +// ClientDealSize mocks base method. func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) @@ -500,13 +515,13 @@ func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.D return ret0, ret1 } -// ClientDealSize indicates an expected call of ClientDealSize +// ClientDealSize indicates an expected call of ClientDealSize. func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) } -// ClientFindData mocks base method +// ClientFindData mocks base method. func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) @@ -515,13 +530,13 @@ func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 * return ret0, ret1 } -// ClientFindData indicates an expected call of ClientFindData +// ClientFindData indicates an expected call of ClientFindData. func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2) } -// ClientGenCar mocks base method +// ClientGenCar mocks base method. func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) @@ -529,13 +544,13 @@ func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 return ret0 } -// ClientGenCar indicates an expected call of ClientGenCar +// ClientGenCar indicates an expected call of ClientGenCar. func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2) } -// ClientGetDealInfo mocks base method +// ClientGetDealInfo mocks base method. func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) @@ -544,13 +559,13 @@ func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*a return ret0, ret1 } -// ClientGetDealInfo indicates an expected call of ClientGetDealInfo +// ClientGetDealInfo indicates an expected call of ClientGetDealInfo. func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1) } -// ClientGetDealStatus mocks base method +// ClientGetDealStatus mocks base method. func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) @@ -559,13 +574,13 @@ func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (s return ret0, ret1 } -// ClientGetDealStatus indicates an expected call of ClientGetDealStatus +// ClientGetDealStatus indicates an expected call of ClientGetDealStatus. func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1) } -// ClientGetDealUpdates mocks base method +// ClientGetDealUpdates mocks base method. func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) @@ -574,13 +589,28 @@ func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.De return ret0, ret1 } -// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates +// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates. func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0) } -// ClientHasLocal mocks base method +// ClientGetRetrievalUpdates mocks base method. +func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0) + ret0, _ := ret[0].(<-chan api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates. +func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0) +} + +// ClientHasLocal mocks base method. func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) @@ -589,13 +619,13 @@ func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, return ret0, ret1 } -// ClientHasLocal indicates an expected call of ClientHasLocal +// ClientHasLocal indicates an expected call of ClientHasLocal. func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1) } -// ClientImport mocks base method +// ClientImport mocks base method. func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) @@ -604,13 +634,13 @@ func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*ap return ret0, ret1 } -// ClientImport indicates an expected call of ClientImport +// ClientImport indicates an expected call of ClientImport. func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1) } -// ClientListDataTransfers mocks base method +// ClientListDataTransfers mocks base method. func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) @@ -619,13 +649,13 @@ func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.Data return ret0, ret1 } -// ClientListDataTransfers indicates an expected call of ClientListDataTransfers +// ClientListDataTransfers indicates an expected call of ClientListDataTransfers. func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0) } -// ClientListDeals mocks base method +// ClientListDeals mocks base method. func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientListDeals", arg0) @@ -634,13 +664,13 @@ func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, er return ret0, ret1 } -// ClientListDeals indicates an expected call of ClientListDeals +// ClientListDeals indicates an expected call of ClientListDeals. func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0) } -// ClientListImports mocks base method +// ClientListImports mocks base method. func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientListImports", arg0) @@ -649,13 +679,28 @@ func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, er return ret0, ret1 } -// ClientListImports indicates an expected call of ClientListImports +// ClientListImports indicates an expected call of ClientListImports. func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0) } -// ClientMinerQueryOffer mocks base method +// ClientListRetrievals mocks base method. +func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListRetrievals", arg0) + ret0, _ := ret[0].([]api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListRetrievals indicates an expected call of ClientListRetrievals. +func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0) +} + +// ClientMinerQueryOffer mocks base method. func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) @@ -664,13 +709,13 @@ func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address. return ret0, ret1 } -// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer +// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer. func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) } -// ClientQueryAsk mocks base method +// ClientQueryAsk mocks base method. func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) @@ -679,13 +724,13 @@ func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 a return ret0, ret1 } -// ClientQueryAsk indicates an expected call of ClientQueryAsk +// ClientQueryAsk indicates an expected call of ClientQueryAsk. func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2) } -// ClientRemoveImport mocks base method +// ClientRemoveImport mocks base method. func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) @@ -693,13 +738,13 @@ func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore. return ret0 } -// ClientRemoveImport indicates an expected call of ClientRemoveImport +// ClientRemoveImport indicates an expected call of ClientRemoveImport. func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1) } -// ClientRestartDataTransfer mocks base method +// ClientRestartDataTransfer mocks base method. func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) @@ -707,13 +752,13 @@ func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 data return ret0 } -// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer +// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer. func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) } -// ClientRetrieve mocks base method +// ClientRetrieve mocks base method. func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) @@ -721,13 +766,13 @@ func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOr return ret0 } -// ClientRetrieve indicates an expected call of ClientRetrieve +// ClientRetrieve indicates an expected call of ClientRetrieve. func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) } -// ClientRetrieveTryRestartInsufficientFunds mocks base method +// ClientRetrieveTryRestartInsufficientFunds mocks base method. func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) @@ -735,13 +780,13 @@ func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Co return ret0 } -// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds +// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds. func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) } -// ClientRetrieveWithEvents mocks base method +// ClientRetrieveWithEvents mocks base method. func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) @@ -750,13 +795,13 @@ func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.R return ret0, ret1 } -// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents +// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) } -// ClientStartDeal mocks base method +// ClientStartDeal mocks base method. func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) @@ -765,13 +810,28 @@ func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDeal return ret0, ret1 } -// ClientStartDeal indicates an expected call of ClientStartDeal +// ClientStartDeal indicates an expected call of ClientStartDeal. func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1) } -// Closing mocks base method +// ClientStatelessDeal mocks base method. +func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStatelessDeal indicates an expected call of ClientStatelessDeal. +func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1) +} + +// Closing mocks base method. func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Closing", arg0) @@ -780,13 +840,13 @@ func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { return ret0, ret1 } -// Closing indicates an expected call of Closing +// Closing indicates an expected call of Closing. func (mr *MockFullNodeMockRecorder) Closing(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Closing", reflect.TypeOf((*MockFullNode)(nil).Closing), arg0) } -// CreateBackup mocks base method +// CreateBackup mocks base method. func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateBackup", arg0, arg1) @@ -794,13 +854,13 @@ func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error { return ret0 } -// CreateBackup indicates an expected call of CreateBackup +// CreateBackup indicates an expected call of CreateBackup. func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1) } -// Discover mocks base method +// Discover mocks base method. func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Discover", arg0) @@ -809,13 +869,13 @@ func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, return ret0, ret1 } -// Discover indicates an expected call of Discover +// Discover indicates an expected call of Discover. func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0) } -// GasEstimateFeeCap mocks base method +// GasEstimateFeeCap mocks base method. func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3) @@ -824,13 +884,13 @@ func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Messa return ret0, ret1 } -// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap +// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap. func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3) } -// GasEstimateGasLimit mocks base method +// GasEstimateGasLimit mocks base method. func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (int64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2) @@ -839,13 +899,13 @@ func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Mes return ret0, ret1 } -// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit +// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit. func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2) } -// GasEstimateGasPremium mocks base method +// GasEstimateGasPremium mocks base method. func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4) @@ -854,13 +914,13 @@ func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, return ret0, ret1 } -// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium +// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium. func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4) } -// GasEstimateMessageGas mocks base method +// GasEstimateMessageGas mocks base method. func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec, arg3 types.TipSetKey) (*types.Message, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3) @@ -869,13 +929,13 @@ func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.M return ret0, ret1 } -// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas +// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas. func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) } -// ID mocks base method +// ID mocks base method. func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ID", arg0) @@ -884,13 +944,13 @@ func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { return ret0, ret1 } -// ID indicates an expected call of ID +// ID indicates an expected call of ID. func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0) } -// LogList mocks base method +// LogList mocks base method. func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LogList", arg0) @@ -899,13 +959,13 @@ func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) { return ret0, ret1 } -// LogList indicates an expected call of LogList +// LogList indicates an expected call of LogList. func (mr *MockFullNodeMockRecorder) LogList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockFullNode)(nil).LogList), arg0) } -// LogSetLevel mocks base method +// LogSetLevel mocks base method. func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2) @@ -913,13 +973,13 @@ func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) erro return ret0 } -// LogSetLevel indicates an expected call of LogSetLevel +// LogSetLevel indicates an expected call of LogSetLevel. func (mr *MockFullNodeMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockFullNode)(nil).LogSetLevel), arg0, arg1, arg2) } -// MarketAddBalance mocks base method +// MarketAddBalance mocks base method. func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3) @@ -928,13 +988,13 @@ func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address return ret0, ret1 } -// MarketAddBalance indicates an expected call of MarketAddBalance +// MarketAddBalance indicates an expected call of MarketAddBalance. func (mr *MockFullNodeMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockFullNode)(nil).MarketAddBalance), arg0, arg1, arg2, arg3) } -// MarketGetReserved mocks base method +// MarketGetReserved mocks base method. func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1) @@ -943,13 +1003,13 @@ func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// MarketGetReserved indicates an expected call of MarketGetReserved +// MarketGetReserved indicates an expected call of MarketGetReserved. func (mr *MockFullNodeMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockFullNode)(nil).MarketGetReserved), arg0, arg1) } -// MarketReleaseFunds mocks base method +// MarketReleaseFunds mocks base method. func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2) @@ -957,13 +1017,13 @@ func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Add return ret0 } -// MarketReleaseFunds indicates an expected call of MarketReleaseFunds +// MarketReleaseFunds indicates an expected call of MarketReleaseFunds. func (mr *MockFullNodeMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReleaseFunds), arg0, arg1, arg2) } -// MarketReserveFunds mocks base method +// MarketReserveFunds mocks base method. func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3) @@ -972,13 +1032,13 @@ func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 addre return ret0, ret1 } -// MarketReserveFunds indicates an expected call of MarketReserveFunds +// MarketReserveFunds indicates an expected call of MarketReserveFunds. func (mr *MockFullNodeMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3) } -// MarketWithdraw mocks base method +// MarketWithdraw mocks base method. func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3) @@ -987,13 +1047,13 @@ func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.A return ret0, ret1 } -// MarketWithdraw indicates an expected call of MarketWithdraw +// MarketWithdraw indicates an expected call of MarketWithdraw. func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockFullNode)(nil).MarketWithdraw), arg0, arg1, arg2, arg3) } -// MinerCreateBlock mocks base method +// MinerCreateBlock mocks base method. func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTemplate) (*types.BlockMsg, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1) @@ -1002,13 +1062,13 @@ func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTem return ret0, ret1 } -// MinerCreateBlock indicates an expected call of MinerCreateBlock +// MinerCreateBlock indicates an expected call of MinerCreateBlock. func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1) } -// MinerGetBaseInfo mocks base method +// MinerGetBaseInfo mocks base method. func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*api.MiningBaseInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3) @@ -1017,13 +1077,13 @@ func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Addre return ret0, ret1 } -// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo +// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo. func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3) } -// MpoolBatchPush mocks base method +// MpoolBatchPush mocks base method. func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1) @@ -1032,13 +1092,13 @@ func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.Signed return ret0, ret1 } -// MpoolBatchPush indicates an expected call of MpoolBatchPush +// MpoolBatchPush indicates an expected call of MpoolBatchPush. func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1) } -// MpoolBatchPushMessage mocks base method +// MpoolBatchPushMessage mocks base method. func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types.Message, arg2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2) @@ -1047,13 +1107,13 @@ func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types return ret0, ret1 } -// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage +// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage. func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2) } -// MpoolBatchPushUntrusted mocks base method +// MpoolBatchPushUntrusted mocks base method. func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1) @@ -1062,13 +1122,58 @@ func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*typ return ret0, ret1 } -// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted +// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted. func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1) } -// MpoolClear mocks base method +// MpoolCheckMessages mocks base method. +func (m *MockFullNode) MpoolCheckMessages(arg0 context.Context, arg1 []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckMessages indicates an expected call of MpoolCheckMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckMessages), arg0, arg1) +} + +// MpoolCheckPendingMessages mocks base method. +func (m *MockFullNode) MpoolCheckPendingMessages(arg0 context.Context, arg1 address.Address) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckPendingMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckPendingMessages indicates an expected call of MpoolCheckPendingMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckPendingMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckPendingMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckPendingMessages), arg0, arg1) +} + +// MpoolCheckReplaceMessages mocks base method. +func (m *MockFullNode) MpoolCheckReplaceMessages(arg0 context.Context, arg1 []*types.Message) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckReplaceMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckReplaceMessages indicates an expected call of MpoolCheckReplaceMessages. +func (mr *MockFullNodeMockRecorder) MpoolCheckReplaceMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckReplaceMessages", reflect.TypeOf((*MockFullNode)(nil).MpoolCheckReplaceMessages), arg0, arg1) +} + +// MpoolClear mocks base method. func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1) @@ -1076,13 +1181,13 @@ func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { return ret0 } -// MpoolClear indicates an expected call of MpoolClear +// MpoolClear indicates an expected call of MpoolClear. func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1) } -// MpoolGetConfig mocks base method +// MpoolGetConfig mocks base method. func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolGetConfig", arg0) @@ -1091,13 +1196,13 @@ func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, return ret0, ret1 } -// MpoolGetConfig indicates an expected call of MpoolGetConfig +// MpoolGetConfig indicates an expected call of MpoolGetConfig. func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0) } -// MpoolGetNonce mocks base method +// MpoolGetNonce mocks base method. func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1) @@ -1106,13 +1211,13 @@ func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// MpoolGetNonce indicates an expected call of MpoolGetNonce +// MpoolGetNonce indicates an expected call of MpoolGetNonce. func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1) } -// MpoolPending mocks base method +// MpoolPending mocks base method. func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1) @@ -1121,13 +1226,13 @@ func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) return ret0, ret1 } -// MpoolPending indicates an expected call of MpoolPending +// MpoolPending indicates an expected call of MpoolPending. func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1) } -// MpoolPush mocks base method +// MpoolPush mocks base method. func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1) @@ -1136,13 +1241,13 @@ func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage return ret0, ret1 } -// MpoolPush indicates an expected call of MpoolPush +// MpoolPush indicates an expected call of MpoolPush. func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1) } -// MpoolPushMessage mocks base method +// MpoolPushMessage mocks base method. func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec) (*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2) @@ -1151,13 +1256,13 @@ func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Messag return ret0, ret1 } -// MpoolPushMessage indicates an expected call of MpoolPushMessage +// MpoolPushMessage indicates an expected call of MpoolPushMessage. func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2) } -// MpoolPushUntrusted mocks base method +// MpoolPushUntrusted mocks base method. func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1) @@ -1166,13 +1271,13 @@ func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.Sign return ret0, ret1 } -// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted +// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted. func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1) } -// MpoolSelect mocks base method +// MpoolSelect mocks base method. func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2) @@ -1181,13 +1286,13 @@ func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, a return ret0, ret1 } -// MpoolSelect indicates an expected call of MpoolSelect +// MpoolSelect indicates an expected call of MpoolSelect. func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2) } -// MpoolSetConfig mocks base method +// MpoolSetConfig mocks base method. func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1) @@ -1195,13 +1300,13 @@ func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolCon return ret0 } -// MpoolSetConfig indicates an expected call of MpoolSetConfig +// MpoolSetConfig indicates an expected call of MpoolSetConfig. func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1) } -// MpoolSub mocks base method +// MpoolSub mocks base method. func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolSub", arg0) @@ -1210,118 +1315,118 @@ func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, e return ret0, ret1 } -// MpoolSub indicates an expected call of MpoolSub +// MpoolSub indicates an expected call of MpoolSub. func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0) } -// MsigAddApprove mocks base method -func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (cid.Cid, error) { +// MsigAddApprove mocks base method. +func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigAddApprove indicates an expected call of MsigAddApprove +// MsigAddApprove indicates an expected call of MsigAddApprove. func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigAddCancel mocks base method -func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (cid.Cid, error) { +// MsigAddCancel mocks base method. +func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigAddCancel indicates an expected call of MsigAddCancel +// MsigAddCancel indicates an expected call of MsigAddCancel. func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5) } -// MsigAddPropose mocks base method -func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { +// MsigAddPropose mocks base method. +func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigAddPropose indicates an expected call of MsigAddPropose +// MsigAddPropose indicates an expected call of MsigAddPropose. func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4) } -// MsigApprove mocks base method -func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (cid.Cid, error) { +// MsigApprove mocks base method. +func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigApprove indicates an expected call of MsigApprove +// MsigApprove indicates an expected call of MsigApprove. func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3) } -// MsigApproveTxnHash mocks base method -func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (cid.Cid, error) { +// MsigApproveTxnHash mocks base method. +func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash +// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash. func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } -// MsigCancel mocks base method -func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (cid.Cid, error) { +// MsigCancel mocks base method. +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigCancel indicates an expected call of MsigCancel +// MsigCancel indicates an expected call of MsigCancel. func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } -// MsigCreate mocks base method -func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (cid.Cid, error) { +// MsigCreate mocks base method. +func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigCreate indicates an expected call of MsigCreate +// MsigCreate indicates an expected call of MsigCreate. func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigGetAvailableBalance mocks base method +// MsigGetAvailableBalance mocks base method. func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetAvailableBalance", arg0, arg1, arg2) @@ -1330,13 +1435,13 @@ func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 addres return ret0, ret1 } -// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance +// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance. func (mr *MockFullNodeMockRecorder) MsigGetAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).MsigGetAvailableBalance), arg0, arg1, arg2) } -// MsigGetPending mocks base method +// MsigGetPending mocks base method. func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*api.MsigTransaction, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetPending", arg0, arg1, arg2) @@ -1345,13 +1450,13 @@ func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address return ret0, ret1 } -// MsigGetPending indicates an expected call of MsigGetPending +// MsigGetPending indicates an expected call of MsigGetPending. func (mr *MockFullNodeMockRecorder) MsigGetPending(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetPending", reflect.TypeOf((*MockFullNode)(nil).MsigGetPending), arg0, arg1, arg2) } -// MsigGetVested mocks base method +// MsigGetVested mocks base method. func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3) @@ -1360,13 +1465,13 @@ func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// MsigGetVested indicates an expected call of MsigGetVested +// MsigGetVested indicates an expected call of MsigGetVested. func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3) } -// MsigGetVestingSchedule mocks base method +// MsigGetVestingSchedule mocks base method. func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MsigVesting, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetVestingSchedule", arg0, arg1, arg2) @@ -1375,88 +1480,88 @@ func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address return ret0, ret1 } -// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule +// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule. func (mr *MockFullNodeMockRecorder) MsigGetVestingSchedule(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVestingSchedule", reflect.TypeOf((*MockFullNode)(nil).MsigGetVestingSchedule), arg0, arg1, arg2) } -// MsigPropose mocks base method -func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (cid.Cid, error) { +// MsigPropose mocks base method. +func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigPropose indicates an expected call of MsigPropose +// MsigPropose indicates an expected call of MsigPropose. func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigRemoveSigner mocks base method -func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { +// MsigRemoveSigner mocks base method. +func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigRemoveSigner indicates an expected call of MsigRemoveSigner +// MsigRemoveSigner indicates an expected call of MsigRemoveSigner. func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4) } -// MsigSwapApprove mocks base method -func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (cid.Cid, error) { +// MsigSwapApprove mocks base method. +func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigSwapApprove indicates an expected call of MsigSwapApprove +// MsigSwapApprove indicates an expected call of MsigSwapApprove. func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigSwapCancel mocks base method -func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (cid.Cid, error) { +// MsigSwapCancel mocks base method. +func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigSwapCancel indicates an expected call of MsigSwapCancel +// MsigSwapCancel indicates an expected call of MsigSwapCancel. func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5) } -// MsigSwapPropose mocks base method -func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (cid.Cid, error) { +// MsigSwapPropose mocks base method. +func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (*api.MessagePrototype, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(cid.Cid) + ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } -// MsigSwapPropose indicates an expected call of MsigSwapPropose +// MsigSwapPropose indicates an expected call of MsigSwapPropose. func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4) } -// NetAddrsListen mocks base method +// NetAddrsListen mocks base method. func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetAddrsListen", arg0) @@ -1465,13 +1570,13 @@ func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, erro return ret0, ret1 } -// NetAddrsListen indicates an expected call of NetAddrsListen +// NetAddrsListen indicates an expected call of NetAddrsListen. func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0) } -// NetAgentVersion mocks base method +// NetAgentVersion mocks base method. func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1) @@ -1480,13 +1585,13 @@ func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (stri return ret0, ret1 } -// NetAgentVersion indicates an expected call of NetAgentVersion +// NetAgentVersion indicates an expected call of NetAgentVersion. func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1) } -// NetAutoNatStatus mocks base method +// NetAutoNatStatus mocks base method. func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0) @@ -1495,13 +1600,13 @@ func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, erro return ret0, ret1 } -// NetAutoNatStatus indicates an expected call of NetAutoNatStatus +// NetAutoNatStatus indicates an expected call of NetAutoNatStatus. func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0) } -// NetBandwidthStats mocks base method +// NetBandwidthStats mocks base method. func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBandwidthStats", arg0) @@ -1510,13 +1615,13 @@ func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, e return ret0, ret1 } -// NetBandwidthStats indicates an expected call of NetBandwidthStats +// NetBandwidthStats indicates an expected call of NetBandwidthStats. func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0) } -// NetBandwidthStatsByPeer mocks base method +// NetBandwidthStatsByPeer mocks base method. func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0) @@ -1525,13 +1630,13 @@ func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string return ret0, ret1 } -// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer +// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer. func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0) } -// NetBandwidthStatsByProtocol mocks base method +// NetBandwidthStatsByProtocol mocks base method. func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0) @@ -1540,13 +1645,13 @@ func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[pr return ret0, ret1 } -// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol +// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol. func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0) } -// NetBlockAdd mocks base method +// NetBlockAdd mocks base method. func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBlockAdd", arg0, arg1) @@ -1554,13 +1659,13 @@ func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) return ret0 } -// NetBlockAdd indicates an expected call of NetBlockAdd +// NetBlockAdd indicates an expected call of NetBlockAdd. func (mr *MockFullNodeMockRecorder) NetBlockAdd(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockAdd", reflect.TypeOf((*MockFullNode)(nil).NetBlockAdd), arg0, arg1) } -// NetBlockList mocks base method +// NetBlockList mocks base method. func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBlockList", arg0) @@ -1569,13 +1674,13 @@ func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, err return ret0, ret1 } -// NetBlockList indicates an expected call of NetBlockList +// NetBlockList indicates an expected call of NetBlockList. func (mr *MockFullNodeMockRecorder) NetBlockList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockList", reflect.TypeOf((*MockFullNode)(nil).NetBlockList), arg0) } -// NetBlockRemove mocks base method +// NetBlockRemove mocks base method. func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockList) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBlockRemove", arg0, arg1) @@ -1583,13 +1688,13 @@ func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockLis return ret0 } -// NetBlockRemove indicates an expected call of NetBlockRemove +// NetBlockRemove indicates an expected call of NetBlockRemove. func (mr *MockFullNodeMockRecorder) NetBlockRemove(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockRemove", reflect.TypeOf((*MockFullNode)(nil).NetBlockRemove), arg0, arg1) } -// NetConnect mocks base method +// NetConnect mocks base method. func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) @@ -1597,13 +1702,13 @@ func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) erro return ret0 } -// NetConnect indicates an expected call of NetConnect +// NetConnect indicates an expected call of NetConnect. func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1) } -// NetConnectedness mocks base method +// NetConnectedness mocks base method. func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1) @@ -1612,13 +1717,13 @@ func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (net return ret0, ret1 } -// NetConnectedness indicates an expected call of NetConnectedness +// NetConnectedness indicates an expected call of NetConnectedness. func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1) } -// NetDisconnect mocks base method +// NetDisconnect mocks base method. func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1) @@ -1626,13 +1731,13 @@ func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { return ret0 } -// NetDisconnect indicates an expected call of NetDisconnect +// NetDisconnect indicates an expected call of NetDisconnect. func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1) } -// NetFindPeer mocks base method +// NetFindPeer mocks base method. func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) @@ -1641,13 +1746,13 @@ func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.Add return ret0, ret1 } -// NetFindPeer indicates an expected call of NetFindPeer +// NetFindPeer indicates an expected call of NetFindPeer. func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) } -// NetPeerInfo mocks base method +// NetPeerInfo mocks base method. func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1) @@ -1656,13 +1761,13 @@ func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.Ext return ret0, ret1 } -// NetPeerInfo indicates an expected call of NetPeerInfo +// NetPeerInfo indicates an expected call of NetPeerInfo. func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1) } -// NetPeers mocks base method +// NetPeers mocks base method. func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetPeers", arg0) @@ -1671,13 +1776,13 @@ func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { return ret0, ret1 } -// NetPeers indicates an expected call of NetPeers +// NetPeers indicates an expected call of NetPeers. func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) } -// NetPubsubScores mocks base method +// NetPubsubScores mocks base method. func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetPubsubScores", arg0) @@ -1686,13 +1791,28 @@ func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, return ret0, ret1 } -// NetPubsubScores indicates an expected call of NetPubsubScores +// NetPubsubScores indicates an expected call of NetPubsubScores. func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) } -// PaychAllocateLane mocks base method +// NodeStatus mocks base method. +func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeStatus", arg0, arg1) + ret0, _ := ret[0].(api.NodeStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeStatus indicates an expected call of NodeStatus. +func (mr *MockFullNodeMockRecorder) NodeStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStatus", reflect.TypeOf((*MockFullNode)(nil).NodeStatus), arg0, arg1) +} + +// PaychAllocateLane mocks base method. func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1) @@ -1701,13 +1821,13 @@ func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// PaychAllocateLane indicates an expected call of PaychAllocateLane +// PaychAllocateLane indicates an expected call of PaychAllocateLane. func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1) } -// PaychAvailableFunds mocks base method +// PaychAvailableFunds mocks base method. func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*api.ChannelAvailableFunds, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1) @@ -1716,13 +1836,13 @@ func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Ad return ret0, ret1 } -// PaychAvailableFunds indicates an expected call of PaychAvailableFunds +// PaychAvailableFunds indicates an expected call of PaychAvailableFunds. func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1) } -// PaychAvailableFundsByFromTo mocks base method +// PaychAvailableFundsByFromTo mocks base method. func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*api.ChannelAvailableFunds, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2) @@ -1731,13 +1851,13 @@ func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, a return ret0, ret1 } -// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo +// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo. func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2) } -// PaychCollect mocks base method +// PaychCollect mocks base method. func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1) @@ -1746,13 +1866,13 @@ func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// PaychCollect indicates an expected call of PaychCollect +// PaychCollect indicates an expected call of PaychCollect. func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) } -// PaychGet mocks base method +// PaychGet mocks base method. func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3) @@ -1761,13 +1881,13 @@ func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address return ret0, ret1 } -// PaychGet indicates an expected call of PaychGet +// PaychGet indicates an expected call of PaychGet. func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3) } -// PaychGetWaitReady mocks base method +// PaychGetWaitReady mocks base method. func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1) @@ -1776,13 +1896,13 @@ func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (ad return ret0, ret1 } -// PaychGetWaitReady indicates an expected call of PaychGetWaitReady +// PaychGetWaitReady indicates an expected call of PaychGetWaitReady. func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1) } -// PaychList mocks base method +// PaychList mocks base method. func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychList", arg0) @@ -1791,13 +1911,13 @@ func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error return ret0, ret1 } -// PaychList indicates an expected call of PaychList +// PaychList indicates an expected call of PaychList. func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0) } -// PaychNewPayment mocks base method +// PaychNewPayment mocks base method. func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []api.VoucherSpec) (*api.PaymentInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3) @@ -1806,13 +1926,13 @@ func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address. return ret0, ret1 } -// PaychNewPayment indicates an expected call of PaychNewPayment +// PaychNewPayment indicates an expected call of PaychNewPayment. func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3) } -// PaychSettle mocks base method +// PaychSettle mocks base method. func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1) @@ -1821,13 +1941,13 @@ func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) ( return ret0, ret1 } -// PaychSettle indicates an expected call of PaychSettle +// PaychSettle indicates an expected call of PaychSettle. func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1) } -// PaychStatus mocks base method +// PaychStatus mocks base method. func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*api.PaychStatus, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1) @@ -1836,13 +1956,13 @@ func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) ( return ret0, ret1 } -// PaychStatus indicates an expected call of PaychStatus +// PaychStatus indicates an expected call of PaychStatus. func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1) } -// PaychVoucherAdd mocks base method +// PaychVoucherAdd mocks base method. func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4) @@ -1851,13 +1971,13 @@ func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Addres return ret0, ret1 } -// PaychVoucherAdd indicates an expected call of PaychVoucherAdd +// PaychVoucherAdd indicates an expected call of PaychVoucherAdd. func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4) } -// PaychVoucherCheckSpendable mocks base method +// PaychVoucherCheckSpendable mocks base method. func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4) @@ -1866,13 +1986,13 @@ func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 add return ret0, ret1 } -// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable +// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable. func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4) } -// PaychVoucherCheckValid mocks base method +// PaychVoucherCheckValid mocks base method. func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2) @@ -1880,13 +2000,13 @@ func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address return ret0 } -// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid +// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid. func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2) } -// PaychVoucherCreate mocks base method +// PaychVoucherCreate mocks base method. func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*api.VoucherCreateResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3) @@ -1895,13 +2015,13 @@ func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// PaychVoucherCreate indicates an expected call of PaychVoucherCreate +// PaychVoucherCreate indicates an expected call of PaychVoucherCreate. func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3) } -// PaychVoucherList mocks base method +// PaychVoucherList mocks base method. func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) @@ -1910,13 +2030,13 @@ func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Addre return ret0, ret1 } -// PaychVoucherList indicates an expected call of PaychVoucherList +// PaychVoucherList indicates an expected call of PaychVoucherList. func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1) } -// PaychVoucherSubmit mocks base method +// PaychVoucherSubmit mocks base method. func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4) @@ -1925,13 +2045,13 @@ func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit +// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit. func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) } -// Session mocks base method +// Session mocks base method. func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Session", arg0) @@ -1940,13 +2060,13 @@ func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { return ret0, ret1 } -// Session indicates an expected call of Session +// Session indicates an expected call of Session. func (mr *MockFullNodeMockRecorder) Session(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Session", reflect.TypeOf((*MockFullNode)(nil).Session), arg0) } -// Shutdown mocks base method +// Shutdown mocks base method. func (m *MockFullNode) Shutdown(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Shutdown", arg0) @@ -1954,13 +2074,13 @@ func (m *MockFullNode) Shutdown(arg0 context.Context) error { return ret0 } -// Shutdown indicates an expected call of Shutdown +// Shutdown indicates an expected call of Shutdown. func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0) } -// StateAccountKey mocks base method +// StateAccountKey mocks base method. func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2) @@ -1969,13 +2089,13 @@ func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Addres return ret0, ret1 } -// StateAccountKey indicates an expected call of StateAccountKey +// StateAccountKey indicates an expected call of StateAccountKey. func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2) } -// StateAllMinerFaults mocks base method +// StateAllMinerFaults mocks base method. func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*api.Fault, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2) @@ -1984,13 +2104,13 @@ func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainE return ret0, ret1 } -// StateAllMinerFaults indicates an expected call of StateAllMinerFaults +// StateAllMinerFaults indicates an expected call of StateAllMinerFaults. func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2) } -// StateCall mocks base method +// StateCall mocks base method. func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (*api.InvocResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2) @@ -1999,13 +2119,13 @@ func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 return ret0, ret1 } -// StateCall indicates an expected call of StateCall +// StateCall indicates an expected call of StateCall. func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2) } -// StateChangedActors mocks base method +// StateChangedActors mocks base method. func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2) @@ -2014,13 +2134,13 @@ func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.C return ret0, ret1 } -// StateChangedActors indicates an expected call of StateChangedActors +// StateChangedActors indicates an expected call of StateChangedActors. func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2) } -// StateCirculatingSupply mocks base method +// StateCirculatingSupply mocks base method. func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1) @@ -2029,13 +2149,13 @@ func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.T return ret0, ret1 } -// StateCirculatingSupply indicates an expected call of StateCirculatingSupply +// StateCirculatingSupply indicates an expected call of StateCirculatingSupply. func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1) } -// StateCompute mocks base method +// StateCompute mocks base method. func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, arg2 []*types.Message, arg3 types.TipSetKey) (*api.ComputeStateOutput, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateCompute", arg0, arg1, arg2, arg3) @@ -2044,13 +2164,13 @@ func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, a return ret0, ret1 } -// StateCompute indicates an expected call of StateCompute +// StateCompute indicates an expected call of StateCompute. func (mr *MockFullNodeMockRecorder) StateCompute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCompute", reflect.TypeOf((*MockFullNode)(nil).StateCompute), arg0, arg1, arg2, arg3) } -// StateDealProviderCollateralBounds mocks base method +// StateDealProviderCollateralBounds mocks base method. func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (api.DealCollateralBounds, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3) @@ -2059,13 +2179,13 @@ func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, a return ret0, ret1 } -// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds +// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds. func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3) } -// StateDecodeParams mocks base method +// StateDecodeParams mocks base method. func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4) @@ -2074,13 +2194,13 @@ func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// StateDecodeParams indicates an expected call of StateDecodeParams +// StateDecodeParams indicates an expected call of StateDecodeParams. func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4) } -// StateGetActor mocks base method +// StateGetActor mocks base method. func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) @@ -2089,13 +2209,13 @@ func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// StateGetActor indicates an expected call of StateGetActor +// StateGetActor indicates an expected call of StateGetActor. func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) } -// StateListActors mocks base method +// StateListActors mocks base method. func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateListActors", arg0, arg1) @@ -2104,13 +2224,13 @@ func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKe return ret0, ret1 } -// StateListActors indicates an expected call of StateListActors +// StateListActors indicates an expected call of StateListActors. func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1) } -// StateListMessages mocks base method +// StateListMessages mocks base method. func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3) @@ -2119,13 +2239,13 @@ func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.Message return ret0, ret1 } -// StateListMessages indicates an expected call of StateListMessages +// StateListMessages indicates an expected call of StateListMessages. func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3) } -// StateListMiners mocks base method +// StateListMiners mocks base method. func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1) @@ -2134,13 +2254,13 @@ func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKe return ret0, ret1 } -// StateListMiners indicates an expected call of StateListMiners +// StateListMiners indicates an expected call of StateListMiners. func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1) } -// StateLookupID mocks base method +// StateLookupID mocks base method. func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2) @@ -2149,13 +2269,13 @@ func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// StateLookupID indicates an expected call of StateLookupID +// StateLookupID indicates an expected call of StateLookupID. func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2) } -// StateMarketBalance mocks base method +// StateMarketBalance mocks base method. func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2) @@ -2164,13 +2284,13 @@ func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// StateMarketBalance indicates an expected call of StateMarketBalance +// StateMarketBalance indicates an expected call of StateMarketBalance. func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2) } -// StateMarketDeals mocks base method +// StateMarketDeals mocks base method. func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketDeal, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1) @@ -2179,13 +2299,13 @@ func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetK return ret0, ret1 } -// StateMarketDeals indicates an expected call of StateMarketDeals +// StateMarketDeals indicates an expected call of StateMarketDeals. func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1) } -// StateMarketParticipants mocks base method +// StateMarketParticipants mocks base method. func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketBalance, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1) @@ -2194,13 +2314,13 @@ func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types. return ret0, ret1 } -// StateMarketParticipants indicates an expected call of StateMarketParticipants +// StateMarketParticipants indicates an expected call of StateMarketParticipants. func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1) } -// StateMarketStorageDeal mocks base method +// StateMarketStorageDeal mocks base method. func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*api.MarketDeal, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2) @@ -2209,13 +2329,13 @@ func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.Dea return ret0, ret1 } -// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal +// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal. func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2) } -// StateMinerActiveSectors mocks base method +// StateMinerActiveSectors mocks base method. func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) @@ -2224,13 +2344,13 @@ func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 addres return ret0, ret1 } -// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2) } -// StateMinerAvailableBalance mocks base method +// StateMinerAvailableBalance mocks base method. func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) @@ -2239,13 +2359,13 @@ func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 add return ret0, ret1 } -// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) } -// StateMinerDeadlines mocks base method +// StateMinerDeadlines mocks base method. func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]api.Deadline, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2) @@ -2254,13 +2374,13 @@ func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Ad return ret0, ret1 } -// StateMinerDeadlines indicates an expected call of StateMinerDeadlines +// StateMinerDeadlines indicates an expected call of StateMinerDeadlines. func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2) } -// StateMinerFaults mocks base method +// StateMinerFaults mocks base method. func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2) @@ -2269,13 +2389,13 @@ func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Addre return ret0, ret1 } -// StateMinerFaults indicates an expected call of StateMinerFaults +// StateMinerFaults indicates an expected call of StateMinerFaults. func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2) } -// StateMinerInfo mocks base method +// StateMinerInfo mocks base method. func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (miner.MinerInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) @@ -2284,13 +2404,13 @@ func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address return ret0, ret1 } -// StateMinerInfo indicates an expected call of StateMinerInfo +// StateMinerInfo indicates an expected call of StateMinerInfo. func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2) } -// StateMinerInitialPledgeCollateral mocks base method +// StateMinerInitialPledgeCollateral mocks base method. func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) @@ -2299,13 +2419,13 @@ func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, a return ret0, ret1 } -// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) } -// StateMinerPartitions mocks base method +// StateMinerPartitions mocks base method. func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]api.Partition, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3) @@ -2314,13 +2434,13 @@ func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.A return ret0, ret1 } -// StateMinerPartitions indicates an expected call of StateMinerPartitions +// StateMinerPartitions indicates an expected call of StateMinerPartitions. func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3) } -// StateMinerPower mocks base method +// StateMinerPower mocks base method. func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.MinerPower, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2) @@ -2329,13 +2449,13 @@ func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Addres return ret0, ret1 } -// StateMinerPower indicates an expected call of StateMinerPower +// StateMinerPower indicates an expected call of StateMinerPower. func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2) } -// StateMinerPreCommitDepositForPower mocks base method +// StateMinerPreCommitDepositForPower mocks base method. func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) @@ -2344,13 +2464,13 @@ func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, return ret0, ret1 } -// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower +// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower. func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3) } -// StateMinerProvingDeadline mocks base method +// StateMinerProvingDeadline mocks base method. func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2) @@ -2359,13 +2479,13 @@ func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 addr return ret0, ret1 } -// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline +// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline. func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2) } -// StateMinerRecoveries mocks base method +// StateMinerRecoveries mocks base method. func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2) @@ -2374,13 +2494,13 @@ func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.A return ret0, ret1 } -// StateMinerRecoveries indicates an expected call of StateMinerRecoveries +// StateMinerRecoveries indicates an expected call of StateMinerRecoveries. func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2) } -// StateMinerSectorAllocated mocks base method +// StateMinerSectorAllocated mocks base method. func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3) @@ -2389,13 +2509,13 @@ func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 addr return ret0, ret1 } -// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated +// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated. func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3) } -// StateMinerSectorCount mocks base method +// StateMinerSectorCount mocks base method. func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MinerSectors, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2) @@ -2404,13 +2524,13 @@ func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address. return ret0, ret1 } -// StateMinerSectorCount indicates an expected call of StateMinerSectorCount +// StateMinerSectorCount indicates an expected call of StateMinerSectorCount. func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2) } -// StateMinerSectors mocks base method +// StateMinerSectors mocks base method. func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3) @@ -2419,13 +2539,13 @@ func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// StateMinerSectors indicates an expected call of StateMinerSectors +// StateMinerSectors indicates an expected call of StateMinerSectors. func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3) } -// StateNetworkName mocks base method +// StateNetworkName mocks base method. func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkName, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateNetworkName", arg0) @@ -2434,13 +2554,13 @@ func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkNam return ret0, ret1 } -// StateNetworkName indicates an expected call of StateNetworkName +// StateNetworkName indicates an expected call of StateNetworkName. func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0) } -// StateNetworkVersion mocks base method +// StateNetworkVersion mocks base method. func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) @@ -2449,13 +2569,13 @@ func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipS return ret0, ret1 } -// StateNetworkVersion indicates an expected call of StateNetworkVersion +// StateNetworkVersion indicates an expected call of StateNetworkVersion. func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1) } -// StateReadState mocks base method +// StateReadState mocks base method. func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.ActorState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2) @@ -2464,13 +2584,13 @@ func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address return ret0, ret1 } -// StateReadState indicates an expected call of StateReadState +// StateReadState indicates an expected call of StateReadState. func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2) } -// StateReplay mocks base method +// StateReplay mocks base method. func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid) (*api.InvocResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateReplay", arg0, arg1, arg2) @@ -2479,13 +2599,13 @@ func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, a return ret0, ret1 } -// StateReplay indicates an expected call of StateReplay +// StateReplay indicates an expected call of StateReplay. func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReplay", reflect.TypeOf((*MockFullNode)(nil).StateReplay), arg0, arg1, arg2) } -// StateSearchMsg mocks base method +// StateSearchMsg mocks base method. func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1, arg2, arg3, arg4) @@ -2494,13 +2614,13 @@ func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 types.TipSetKey return ret0, ret1 } -// StateSearchMsg indicates an expected call of StateSearchMsg +// StateSearchMsg indicates an expected call of StateSearchMsg. func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1, arg2, arg3, arg4) } -// StateSectorExpiration mocks base method +// StateSectorExpiration mocks base method. func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorExpiration, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) @@ -2509,13 +2629,13 @@ func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address. return ret0, ret1 } -// StateSectorExpiration indicates an expected call of StateSectorExpiration +// StateSectorExpiration indicates an expected call of StateSectorExpiration. func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3) } -// StateSectorGetInfo mocks base method +// StateSectorGetInfo mocks base method. func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3) @@ -2524,13 +2644,13 @@ func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// StateSectorGetInfo indicates an expected call of StateSectorGetInfo +// StateSectorGetInfo indicates an expected call of StateSectorGetInfo. func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3) } -// StateSectorPartition mocks base method +// StateSectorPartition mocks base method. func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorLocation, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) @@ -2539,13 +2659,13 @@ func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.A return ret0, ret1 } -// StateSectorPartition indicates an expected call of StateSectorPartition +// StateSectorPartition indicates an expected call of StateSectorPartition. func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3) } -// StateSectorPreCommitInfo mocks base method +// StateSectorPreCommitInfo mocks base method. func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) @@ -2554,13 +2674,13 @@ func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 addre return ret0, ret1 } -// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) } -// StateVMCirculatingSupplyInternal mocks base method +// StateVMCirculatingSupplyInternal mocks base method. func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) @@ -2569,13 +2689,13 @@ func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, ar return ret0, ret1 } -// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) } -// StateVerifiedClientStatus mocks base method +// StateVerifiedClientStatus mocks base method. func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2) @@ -2584,13 +2704,13 @@ func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 addr return ret0, ret1 } -// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus +// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus. func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2) } -// StateVerifiedRegistryRootKey mocks base method +// StateVerifiedRegistryRootKey mocks base method. func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1) @@ -2599,13 +2719,13 @@ func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 t return ret0, ret1 } -// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey +// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey. func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1) } -// StateVerifierStatus mocks base method +// StateVerifierStatus mocks base method. func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2) @@ -2614,13 +2734,13 @@ func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Ad return ret0, ret1 } -// StateVerifierStatus indicates an expected call of StateVerifierStatus +// StateVerifierStatus indicates an expected call of StateVerifierStatus. func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2) } -// StateWaitMsg mocks base method +// StateWaitMsg mocks base method. func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2, arg3, arg4) @@ -2629,13 +2749,13 @@ func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uin return ret0, ret1 } -// StateWaitMsg indicates an expected call of StateWaitMsg +// StateWaitMsg indicates an expected call of StateWaitMsg. func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4) } -// SyncCheckBad mocks base method +// SyncCheckBad mocks base method. func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncCheckBad", arg0, arg1) @@ -2644,13 +2764,13 @@ func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, return ret0, ret1 } -// SyncCheckBad indicates an expected call of SyncCheckBad +// SyncCheckBad indicates an expected call of SyncCheckBad. func (mr *MockFullNodeMockRecorder) SyncCheckBad(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckBad", reflect.TypeOf((*MockFullNode)(nil).SyncCheckBad), arg0, arg1) } -// SyncCheckpoint mocks base method +// SyncCheckpoint mocks base method. func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncCheckpoint", arg0, arg1) @@ -2658,13 +2778,13 @@ func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey return ret0 } -// SyncCheckpoint indicates an expected call of SyncCheckpoint +// SyncCheckpoint indicates an expected call of SyncCheckpoint. func (mr *MockFullNodeMockRecorder) SyncCheckpoint(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckpoint", reflect.TypeOf((*MockFullNode)(nil).SyncCheckpoint), arg0, arg1) } -// SyncIncomingBlocks mocks base method +// SyncIncomingBlocks mocks base method. func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.BlockHeader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncIncomingBlocks", arg0) @@ -2673,13 +2793,13 @@ func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.B return ret0, ret1 } -// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks +// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks. func (mr *MockFullNodeMockRecorder) SyncIncomingBlocks(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncIncomingBlocks", reflect.TypeOf((*MockFullNode)(nil).SyncIncomingBlocks), arg0) } -// SyncMarkBad mocks base method +// SyncMarkBad mocks base method. func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncMarkBad", arg0, arg1) @@ -2687,13 +2807,13 @@ func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error { return ret0 } -// SyncMarkBad indicates an expected call of SyncMarkBad +// SyncMarkBad indicates an expected call of SyncMarkBad. func (mr *MockFullNodeMockRecorder) SyncMarkBad(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncMarkBad), arg0, arg1) } -// SyncState mocks base method +// SyncState mocks base method. func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncState", arg0) @@ -2702,13 +2822,13 @@ func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) { return ret0, ret1 } -// SyncState indicates an expected call of SyncState +// SyncState indicates an expected call of SyncState. func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0) } -// SyncSubmitBlock mocks base method +// SyncSubmitBlock mocks base method. func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1) @@ -2716,13 +2836,13 @@ func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMs return ret0 } -// SyncSubmitBlock indicates an expected call of SyncSubmitBlock +// SyncSubmitBlock indicates an expected call of SyncSubmitBlock. func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) } -// SyncUnmarkAllBad mocks base method +// SyncUnmarkAllBad mocks base method. func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncUnmarkAllBad", arg0) @@ -2730,13 +2850,13 @@ func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { return ret0 } -// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad +// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad. func (mr *MockFullNodeMockRecorder) SyncUnmarkAllBad(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkAllBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkAllBad), arg0) } -// SyncUnmarkBad mocks base method +// SyncUnmarkBad mocks base method. func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncUnmarkBad", arg0, arg1) @@ -2744,13 +2864,13 @@ func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error { return ret0 } -// SyncUnmarkBad indicates an expected call of SyncUnmarkBad +// SyncUnmarkBad indicates an expected call of SyncUnmarkBad. func (mr *MockFullNodeMockRecorder) SyncUnmarkBad(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkBad), arg0, arg1) } -// SyncValidateTipset mocks base method +// SyncValidateTipset mocks base method. func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSetKey) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncValidateTipset", arg0, arg1) @@ -2759,13 +2879,13 @@ func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSe return ret0, ret1 } -// SyncValidateTipset indicates an expected call of SyncValidateTipset +// SyncValidateTipset indicates an expected call of SyncValidateTipset. func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1) } -// Version mocks base method +// Version mocks base method. func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Version", arg0) @@ -2774,13 +2894,13 @@ func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { return ret0, ret1 } -// Version indicates an expected call of Version +// Version indicates an expected call of Version. func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0) } -// WalletBalance mocks base method +// WalletBalance mocks base method. func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1) @@ -2789,13 +2909,13 @@ func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// WalletBalance indicates an expected call of WalletBalance +// WalletBalance indicates an expected call of WalletBalance. func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1) } -// WalletDefaultAddress mocks base method +// WalletDefaultAddress mocks base method. func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0) @@ -2804,13 +2924,13 @@ func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Addre return ret0, ret1 } -// WalletDefaultAddress indicates an expected call of WalletDefaultAddress +// WalletDefaultAddress indicates an expected call of WalletDefaultAddress. func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0) } -// WalletDelete mocks base method +// WalletDelete mocks base method. func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) @@ -2818,13 +2938,13 @@ func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) return ret0 } -// WalletDelete indicates an expected call of WalletDelete +// WalletDelete indicates an expected call of WalletDelete. func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1) } -// WalletExport mocks base method +// WalletExport mocks base method. func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletExport", arg0, arg1) @@ -2833,13 +2953,13 @@ func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// WalletExport indicates an expected call of WalletExport +// WalletExport indicates an expected call of WalletExport. func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1) } -// WalletHas mocks base method +// WalletHas mocks base method. func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) @@ -2848,13 +2968,13 @@ func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bo return ret0, ret1 } -// WalletHas indicates an expected call of WalletHas +// WalletHas indicates an expected call of WalletHas. func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1) } -// WalletImport mocks base method +// WalletImport mocks base method. func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) @@ -2863,13 +2983,13 @@ func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) ( return ret0, ret1 } -// WalletImport indicates an expected call of WalletImport +// WalletImport indicates an expected call of WalletImport. func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1) } -// WalletList mocks base method +// WalletList mocks base method. func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletList", arg0) @@ -2878,13 +2998,13 @@ func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, erro return ret0, ret1 } -// WalletList indicates an expected call of WalletList +// WalletList indicates an expected call of WalletList. func (mr *MockFullNodeMockRecorder) WalletList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockFullNode)(nil).WalletList), arg0) } -// WalletNew mocks base method +// WalletNew mocks base method. func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletNew", arg0, arg1) @@ -2893,13 +3013,13 @@ func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (addr return ret0, ret1 } -// WalletNew indicates an expected call of WalletNew +// WalletNew indicates an expected call of WalletNew. func (mr *MockFullNodeMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockFullNode)(nil).WalletNew), arg0, arg1) } -// WalletSetDefault mocks base method +// WalletSetDefault mocks base method. func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1) @@ -2907,13 +3027,13 @@ func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Addre return ret0 } -// WalletSetDefault indicates an expected call of WalletSetDefault +// WalletSetDefault indicates an expected call of WalletSetDefault. func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1) } -// WalletSign mocks base method +// WalletSign mocks base method. func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte) (*crypto.Signature, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2) @@ -2922,13 +3042,13 @@ func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, ar return ret0, ret1 } -// WalletSign indicates an expected call of WalletSign +// WalletSign indicates an expected call of WalletSign. func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2) } -// WalletSignMessage mocks base method +// WalletSignMessage mocks base method. func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *types.Message) (*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2) @@ -2937,13 +3057,13 @@ func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// WalletSignMessage indicates an expected call of WalletSignMessage +// WalletSignMessage indicates an expected call of WalletSignMessage. func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2) } -// WalletValidateAddress mocks base method +// WalletValidateAddress mocks base method. func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletValidateAddress", arg0, arg1) @@ -2952,13 +3072,13 @@ func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) return ret0, ret1 } -// WalletValidateAddress indicates an expected call of WalletValidateAddress +// WalletValidateAddress indicates an expected call of WalletValidateAddress. func (mr *MockFullNodeMockRecorder) WalletValidateAddress(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletValidateAddress", reflect.TypeOf((*MockFullNode)(nil).WalletValidateAddress), arg0, arg1) } -// WalletVerify mocks base method +// WalletVerify mocks base method. func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 *crypto.Signature) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletVerify", arg0, arg1, arg2, arg3) @@ -2967,7 +3087,7 @@ func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// WalletVerify indicates an expected call of WalletVerify +// WalletVerify indicates an expected call of WalletVerify. func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3) diff --git a/api/proxy_gen.go b/api/proxy_gen.go index a46f33459..e8ea27469 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -4,7 +4,6 @@ package api import ( "context" - "io" "time" "github.com/filecoin-project/go-address" @@ -128,6 +127,8 @@ type FullNodeStruct struct { ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` + ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]Message, error) `perm:"read"` + ChainGetNode func(p0 context.Context, p1 string) (*IpldObject, error) `perm:"read"` ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `perm:"read"` @@ -180,6 +181,8 @@ type FullNodeStruct struct { ClientGetDealUpdates func(p0 context.Context) (<-chan DealInfo, error) `perm:"write"` + ClientGetRetrievalUpdates func(p0 context.Context) (<-chan RetrievalInfo, error) `perm:"write"` + ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` ClientImport func(p0 context.Context, p1 FileRef) (*ImportRes, error) `perm:"admin"` @@ -190,6 +193,8 @@ type FullNodeStruct struct { ClientListImports func(p0 context.Context) ([]Import, error) `perm:"write"` + ClientListRetrievals func(p0 context.Context) ([]RetrievalInfo, error) `perm:"write"` + ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"` ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` @@ -206,6 +211,8 @@ type FullNodeStruct struct { ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"` + ClientStatelessDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"write"` + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` @@ -236,6 +243,12 @@ type FullNodeStruct struct { MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` + MpoolCheckMessages func(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) `perm:"read"` + + MpoolCheckPendingMessages func(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) `perm:"read"` + + MpoolCheckReplaceMessages func(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) `perm:"read"` + MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"` MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"` @@ -256,19 +269,19 @@ type FullNodeStruct struct { MpoolSub func(p0 context.Context) (<-chan MpoolUpdate, error) `perm:"read"` - MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) `perm:"sign"` + MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) `perm:"sign"` - MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) `perm:"sign"` + MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) `perm:"sign"` - MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"` + MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"` - MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) `perm:"sign"` + MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"` - MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) `perm:"sign"` + MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"` - MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) `perm:"sign"` + MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` - MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) `perm:"sign"` + MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"` @@ -278,15 +291,17 @@ type FullNodeStruct struct { MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) `perm:"read"` - MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) `perm:"sign"` + MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) `perm:"sign"` - MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"` + MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"` - MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) `perm:"sign"` + MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) `perm:"sign"` - MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) `perm:"sign"` + MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) `perm:"sign"` - MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) `perm:"sign"` + MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) `perm:"sign"` + + NodeStatus func(p0 context.Context, p1 bool) (NodeStatus, error) `perm:"read"` PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"` @@ -518,6 +533,8 @@ type GatewayStruct struct { StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` + Version func(p0 context.Context) (APIVersion, error) `` + WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` } } @@ -650,6 +667,8 @@ type StorageMinerStruct struct { SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` + SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"` @@ -688,6 +707,8 @@ type StorageMinerStruct struct { SectorsSummary func(p0 context.Context) (map[SectorState]int, error) `perm:"read"` + SectorsUnsealPiece func(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error `perm:"admin"` + SectorsUpdate func(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error `perm:"admin"` StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"` @@ -730,19 +751,19 @@ type StorageMinerStub struct { type WalletStruct struct { Internal struct { - WalletDelete func(p0 context.Context, p1 address.Address) error `` + WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"` - WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `` + WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"` - WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `` + WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"admin"` - WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `` + WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"` - WalletList func(p0 context.Context) ([]address.Address, error) `` + WalletList func(p0 context.Context) ([]address.Address, error) `perm:"admin"` - WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `` + WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"admin"` - WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) `` + WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) `perm:"admin"` } } @@ -767,8 +788,6 @@ type WorkerStruct struct { ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"` - ReadPiece func(p0 context.Context, p1 io.Writer, p2 storage.SectorRef, p3 storiface.UnpaddedByteIndex, p4 abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"` - ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"` @@ -1084,6 +1103,14 @@ func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.M return nil, xerrors.New("method not supported") } +func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) { + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]Message, error) { + return *new([]Message), xerrors.New("method not supported") +} + func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*IpldObject, error) { return s.Internal.ChainGetNode(p0, p1) } @@ -1292,6 +1319,14 @@ func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo return nil, xerrors.New("method not supported") } +func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { + return s.Internal.ClientGetRetrievalUpdates(p0) +} + +func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) { + return nil, xerrors.New("method not supported") +} + func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { return s.Internal.ClientHasLocal(p0, p1) } @@ -1332,6 +1367,14 @@ func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]Import, error) { return *new([]Import), xerrors.New("method not supported") } +func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { + return s.Internal.ClientListRetrievals(p0) +} + +func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) { + return *new([]RetrievalInfo), xerrors.New("method not supported") +} + func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) } @@ -1396,6 +1439,14 @@ func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *StartDealParams) return nil, xerrors.New("method not supported") } +func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStatelessDeal(p0, p1) +} + +func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { + return nil, xerrors.New("method not supported") +} + func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { return s.Internal.CreateBackup(p0, p1) } @@ -1516,6 +1567,30 @@ func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.S return *new([]cid.Cid), xerrors.New("method not supported") } +func (s *FullNodeStruct) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) { + return s.Internal.MpoolCheckMessages(p0, p1) +} + +func (s *FullNodeStub) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) { + return *new([][]MessageCheckStatus), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) { + return s.Internal.MpoolCheckPendingMessages(p0, p1) +} + +func (s *FullNodeStub) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) { + return *new([][]MessageCheckStatus), xerrors.New("method not supported") +} + +func (s *FullNodeStruct) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) { + return s.Internal.MpoolCheckReplaceMessages(p0, p1) +} + +func (s *FullNodeStub) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) { + return *new([][]MessageCheckStatus), xerrors.New("method not supported") +} + func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error { return s.Internal.MpoolClear(p0, p1) } @@ -1596,60 +1671,60 @@ func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error) return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { +func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) { return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) } -func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { +func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) { return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) } -func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { +func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) } -func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { +func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { return s.Internal.MsigApprove(p0, p1, p2, p3) } -func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { +func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) { return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) } -func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { +func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) } -func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { +func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) { return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) } -func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { @@ -1684,44 +1759,52 @@ func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Add return *new(MsigVesting), xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { +func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) { return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) } -func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { +func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) } -func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { +func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) { return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) } -func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { +func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) { return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) } -func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") } -func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { +func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) { return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) } -func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { - return *new(cid.Cid), xerrors.New("method not supported") +func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) { + return nil, xerrors.New("method not supported") +} + +func (s *FullNodeStruct) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) { + return s.Internal.NodeStatus(p0, p1) +} + +func (s *FullNodeStub) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) { + return *new(NodeStatus), xerrors.New("method not supported") } func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { @@ -2612,6 +2695,14 @@ func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 return nil, xerrors.New("method not supported") } +func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) { + return s.Internal.Version(p0) +} + +func (s *GatewayStub) Version(p0 context.Context) (APIVersion, error) { + return *new(APIVersion), xerrors.New("method not supported") +} + func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { return s.Internal.WalletBalance(p0, p1) } @@ -3076,6 +3167,14 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf return nil, xerrors.New("method not supported") } +func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { + return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { + return *new(SectorOffset), xerrors.New("method not supported") +} + func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) { return s.Internal.SectorCommitFlush(p0) } @@ -3228,6 +3327,14 @@ func (s *StorageMinerStub) SectorsSummary(p0 context.Context) (map[SectorState]i return *new(map[SectorState]int), xerrors.New("method not supported") } +func (s *StorageMinerStruct) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error { + return s.Internal.SectorsUnsealPiece(p0, p1, p2, p3, p4, p5) +} + +func (s *StorageMinerStub) SectorsUnsealPiece(p0 context.Context, p1 storage.SectorRef, p2 storiface.UnpaddedByteIndex, p3 abi.UnpaddedPieceSize, p4 abi.SealRandomness, p5 *cid.Cid) error { + return xerrors.New("method not supported") +} + func (s *StorageMinerStruct) SectorsUpdate(p0 context.Context, p1 abi.SectorNumber, p2 SectorState) error { return s.Internal.SectorsUpdate(p0, p1, p2) } @@ -3484,14 +3591,6 @@ func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) { return *new(uuid.UUID), xerrors.New("method not supported") } -func (s *WorkerStruct) ReadPiece(p0 context.Context, p1 io.Writer, p2 storage.SectorRef, p3 storiface.UnpaddedByteIndex, p4 abi.UnpaddedPieceSize) (storiface.CallID, error) { - return s.Internal.ReadPiece(p0, p1, p2, p3, p4) -} - -func (s *WorkerStub) ReadPiece(p0 context.Context, p1 io.Writer, p2 storage.SectorRef, p3 storiface.UnpaddedByteIndex, p4 abi.UnpaddedPieceSize) (storiface.CallID, error) { - return *new(storiface.CallID), xerrors.New("method not supported") -} - func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { return s.Internal.ReleaseUnsealed(p0, p1, p2) } diff --git a/api/test/blockminer.go b/api/test/blockminer.go deleted file mode 100644 index 23af94a36..000000000 --- a/api/test/blockminer.go +++ /dev/null @@ -1,61 +0,0 @@ -package test - -import ( - "context" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/miner" -) - -type BlockMiner struct { - ctx context.Context - t *testing.T - miner TestStorageNode - blocktime time.Duration - mine int64 - nulls int64 - done chan struct{} -} - -func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner { - return &BlockMiner{ - ctx: ctx, - t: t, - miner: miner, - blocktime: blocktime, - mine: int64(1), - done: make(chan struct{}), - } -} - -func (bm *BlockMiner) MineBlocks() { - time.Sleep(time.Second) - go func() { - defer close(bm.done) - for atomic.LoadInt64(&bm.mine) == 1 { - select { - case <-bm.ctx.Done(): - return - case <-time.After(bm.blocktime): - } - - nulls := atomic.SwapInt64(&bm.nulls, 0) - if err := bm.miner.MineOne(bm.ctx, miner.MineReq{ - InjectNulls: abi.ChainEpoch(nulls), - Done: func(bool, abi.ChainEpoch, error) {}, - }); err != nil { - bm.t.Error(err) - } - } - }() -} - -func (bm *BlockMiner) Stop() { - atomic.AddInt64(&bm.mine, -1) - fmt.Println("shutting down mining") - <-bm.done -} diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go deleted file mode 100644 index 283c8f610..000000000 --- a/api/test/ccupgrade.go +++ /dev/null @@ -1,127 +0,0 @@ -package test - -import ( - "context" - "fmt" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl" -) - -func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { - for _, height := range []abi.ChainEpoch{ - -1, // before - 162, // while sealing - 530, // after upgrade deal - 5000, // after - } { - height := height // make linters happy by copying - t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { - testCCUpgrade(t, b, blocktime, height) - }) - } -} - -func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) { - ctx := context.Background() - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeHeight)}, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - t.Error(err) - } - } - }() - - maddr, err := miner.ActorAddress(ctx) - if err != nil { - t.Fatal(err) - } - - CC := abi.SectorNumber(GenesisPreseals + 1) - Upgraded := CC + 1 - - pledgeSectors(t, ctx, miner, 1, 0, nil) - - sl, err := miner.SectorsList(ctx) - if err != nil { - t.Fatal(err) - } - if len(sl) != 1 { - t.Fatal("expected 1 sector") - } - - if sl[0] != CC { - t.Fatal("bad") - } - - { - si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK) - require.NoError(t, err) - require.Less(t, 50000, int(si.Expiration)) - } - - if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil { - t.Fatal(err) - } - - MakeDeal(t, ctx, 6, client, miner, false, false, 0) - - // Validate upgrade - - { - exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) - require.NoError(t, err) - require.NotNil(t, exp) - require.Greater(t, 50000, int(exp.OnTime)) - } - { - exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) - require.NoError(t, err) - require.Less(t, 50000, int(exp.OnTime)) - } - - dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - // Sector should expire. - for { - // Wait for the sector to expire. - status, err := miner.SectorsStatus(ctx, CC, true) - require.NoError(t, err) - if status.OnTime == 0 && status.Early == 0 { - break - } - t.Log("waiting for sector to expire") - // wait one deadline per loop. - time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime) - } - - fmt.Println("shutting down mining") - atomic.AddInt64(&mine, -1) - <-done -} diff --git a/api/test/deals.go b/api/test/deals.go deleted file mode 100644 index aa7e23bcc..000000000 --- a/api/test/deals.go +++ /dev/null @@ -1,621 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sort" - "testing" - "time" - - "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" - "github.com/ipld/go-car" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/types" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" - "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" - "github.com/filecoin-project/lotus/node/modules/dtypes" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - ipld "github.com/ipfs/go-ipld-format" - dag "github.com/ipfs/go-merkledag" - dstest "github.com/ipfs/go-merkledag/test" - unixfile "github.com/ipfs/go-unixfs/file" -) - -func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) { - s := setupOneClientOneMiner(t, b, blocktime) - defer s.blockMiner.Stop() - - MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch) -} - -func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) { - s := setupOneClientOneMiner(t, b, blocktime) - defer s.blockMiner.Stop() - - MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch) - MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch) -} - -func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) { - res, data, err := CreateClientFile(ctx, client, rseed, 0) - if err != nil { - t.Fatal(err) - } - - fcid := res.Root - fmt.Println("FILE CID: ", fcid) - - deal := startDeal(t, ctx, miner, client, fcid, fastRet, startEpoch) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - waitDealSealed(t, ctx, miner, client, deal, false, false, nil) - - // Retrieval - info, err := client.ClientGetDealInfo(ctx, *deal) - require.NoError(t, err) - - testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data) -} - -func CreateClientFile(ctx context.Context, client api.FullNode, rseed, size int) (*api.ImportRes, []byte, error) { - if size == 0 { - size = 1600 - } - data := make([]byte, size) - rand.New(rand.NewSource(int64(rseed))).Read(data) - - dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-") - if err != nil { - return nil, nil, err - } - - path := filepath.Join(dir, "sourcefile.dat") - err = ioutil.WriteFile(path, data, 0644) - if err != nil { - return nil, nil, err - } - - res, err := client.ClientImport(ctx, api.FileRef{Path: path}) - if err != nil { - return nil, nil, err - } - return res, data, nil -} - -func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) { - publishPeriod := 10 * time.Second - maxDealsPerMsg := uint64(2) - - // Set max deals per publish deals message to 2 - minerDef := []StorageMiner{{ - Full: 0, - Opts: node.Override( - new(*storageadapter.DealPublisher), - storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: maxDealsPerMsg, - })), - Preseal: PresealGenesis, - }} - - // Create a connect client and miner node - n, sn := b(t, OneFull, minerDef) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - s := connectAndStartMining(t, b, blocktime, client, miner) - defer s.blockMiner.Stop() - - // Starts a deal and waits until it's published - runDealTillPublish := func(rseed int) { - res, _, err := CreateClientFile(s.ctx, s.client, rseed, 0) - require.NoError(t, err) - - upds, err := client.ClientGetDealUpdates(s.ctx) - require.NoError(t, err) - - startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - for upd := range upds { - if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit { - done <- struct{}{} - } - } - }() - <-done - } - - // Run three deals in parallel - done := make(chan struct{}, maxDealsPerMsg+1) - for rseed := 1; rseed <= 3; rseed++ { - rseed := rseed - go func() { - runDealTillPublish(rseed) - done <- struct{}{} - }() - } - - // Wait for two of the deals to be published - for i := 0; i < int(maxDealsPerMsg); i++ { - <-done - } - - // Expect a single PublishStorageDeals message that includes the first two deals - msgCids, err := s.client.StateListMessages(s.ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1) - require.NoError(t, err) - count := 0 - for _, msgCid := range msgCids { - msg, err := s.client.ChainGetMessage(s.ctx, msgCid) - require.NoError(t, err) - - if msg.Method == market.Methods.PublishStorageDeals { - count++ - var pubDealsParams market2.PublishStorageDealsParams - err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params)) - require.NoError(t, err) - require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg)) - } - } - require.Equal(t, 1, count) - - // The third deal should be published once the publish period expires. - // Allow a little padding as it takes a moment for the state change to - // be noticed by the client. - padding := 10 * time.Second - select { - case <-time.After(publishPeriod + padding): - require.Fail(t, "Expected 3rd deal to be published once publish period elapsed") - case <-done: // Success - } -} - -func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) { - run := func(piece, deals, expectSectors int) func(t *testing.T) { - return func(t *testing.T) { - publishPeriod := 10 * time.Second - maxDealsPerMsg := uint64(deals) - - // Set max deals per publish deals message to maxDealsPerMsg - minerDef := []StorageMiner{{ - Full: 0, - Opts: node.Options( - node.Override( - new(*storageadapter.DealPublisher), - storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ - Period: publishPeriod, - MaxDealsPerMsg: maxDealsPerMsg, - })), - node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { - return func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 1, - MaxSealingSectorsForDeals: 3, - AlwaysKeepUnsealedCopy: true, - WaitDealsDelay: time.Hour, - }, nil - }, nil - }), - ), - Preseal: PresealGenesis, - }} - - // Create a connect client and miner node - n, sn := b(t, OneFull, minerDef) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - s := connectAndStartMining(t, b, blocktime, client, miner) - defer s.blockMiner.Stop() - - err := miner.MarketSetAsk(s.ctx, big.Zero(), big.Zero(), 200, 128, 32<<30) - require.NoError(t, err) - - checkNoPadding := func() { - sl, err := sn[0].SectorsList(s.ctx) - require.NoError(t, err) - - sort.Slice(sl, func(i, j int) bool { - return sl[i] < sl[j] - }) - - for _, snum := range sl { - si, err := sn[0].SectorsStatus(s.ctx, snum, false) - require.NoError(t, err) - - // fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State) - - for _, deal := range si.Deals { - if deal == 0 { - fmt.Printf("sector %d had a padding piece!\n", snum) - } - } - } - } - - // Starts a deal and waits until it's published - runDealTillSeal := func(rseed int) { - res, _, err := CreateClientFile(s.ctx, s.client, rseed, piece) - require.NoError(t, err) - - dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch) - waitDealSealed(t, s.ctx, s.miner, s.client, dc, false, true, checkNoPadding) - } - - // Run maxDealsPerMsg deals in parallel - done := make(chan struct{}, maxDealsPerMsg) - for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ { - rseed := rseed - go func() { - runDealTillSeal(rseed) - done <- struct{}{} - }() - } - - // Wait for maxDealsPerMsg of the deals to be published - for i := 0; i < int(maxDealsPerMsg); i++ { - <-done - } - - checkNoPadding() - - sl, err := sn[0].SectorsList(s.ctx) - require.NoError(t, err) - require.Equal(t, len(sl), expectSectors) - } - } - - t.Run("4-p1600B", run(1600, 4, 4)) - t.Run("4-p513B", run(513, 4, 2)) - if !testing.Short() { - t.Run("32-p257B", run(257, 32, 8)) - t.Run("32-p10B", run(10, 32, 2)) - - // fixme: this appears to break data-transfer / markets in some really creative ways - //t.Run("128-p10B", run(10, 128, 8)) - } -} - -func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) { - s := setupOneClientOneMiner(t, b, blocktime) - defer s.blockMiner.Stop() - - data := make([]byte, 1600) - rand.New(rand.NewSource(int64(8))).Read(data) - - r := bytes.NewReader(data) - fcid, err := s.client.ClientImportLocal(s.ctx, r) - if err != nil { - t.Fatal(err) - } - - fmt.Println("FILE CID: ", fcid) - - deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch) - - waitDealPublished(t, s.ctx, s.miner, deal) - fmt.Println("deal published, retrieving") - // Retrieval - info, err := s.client.ClientGetDealInfo(s.ctx, *deal) - require.NoError(t, err) - - testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data) -} - -func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) { - s := setupOneClientOneMiner(t, b, blocktime) - defer s.blockMiner.Stop() - - { - data1 := make([]byte, 800) - rand.New(rand.NewSource(int64(3))).Read(data1) - r := bytes.NewReader(data1) - - fcid1, err := s.client.ClientImportLocal(s.ctx, r) - if err != nil { - t.Fatal(err) - } - - data2 := make([]byte, 800) - rand.New(rand.NewSource(int64(9))).Read(data2) - r2 := bytes.NewReader(data2) - - fcid2, err := s.client.ClientImportLocal(s.ctx, r2) - if err != nil { - t.Fatal(err) - } - - deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true, false, nil) - - deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0) - - time.Sleep(time.Second) - waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false, false, nil) - - // Retrieval - info, err := s.client.ClientGetDealInfo(s.ctx, *deal2) - require.NoError(t, err) - - rf, _ := s.miner.SectorsRefs(s.ctx) - fmt.Printf("refs: %+v\n", rf) - - testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2) - } -} - -func TestZeroPricePerByteRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) { - s := setupOneClientOneMiner(t, b, blocktime) - defer s.blockMiner.Stop() - - // Set price-per-byte to zero - ask, err := s.miner.MarketGetRetrievalAsk(s.ctx) - require.NoError(t, err) - - ask.PricePerByte = abi.NewTokenAmount(0) - err = s.miner.MarketSetRetrievalAsk(s.ctx, ask) - require.NoError(t, err) - - MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch) -} - -func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid { - maddr, err := miner.ActorAddress(ctx) - if err != nil { - t.Fatal(err) - } - - addr, err := client.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{ - Data: &storagemarket.DataRef{ - TransferType: storagemarket.TTGraphsync, - Root: fcid, - }, - Wallet: addr, - Miner: maddr, - EpochPrice: types.NewInt(1000000), - DealStartEpoch: startEpoch, - MinBlocksDuration: uint64(build.MinDealDuration), - FastRetrieval: fastRet, - }) - if err != nil { - t.Fatalf("%+v", err) - } - return deal -} - -func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal, noSealStart bool, cb func()) { -loop: - for { - di, err := client.ClientGetDealInfo(ctx, *deal) - if err != nil { - t.Fatal(err) - } - switch di.State { - case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing: - if noseal { - return - } - if !noSealStart { - startSealingWaiting(t, ctx, miner) - } - case storagemarket.StorageDealProposalRejected: - t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - t.Fatal("deal failed") - case storagemarket.StorageDealError: - t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealActive: - fmt.Println("COMPLETE", di) - break loop - } - - mds, err := miner.MarketListIncompleteDeals(ctx) - if err != nil { - t.Fatal(err) - } - - var minerState storagemarket.StorageDealStatus - for _, md := range mds { - if md.DealID == di.DealID { - minerState = md.State - break - } - } - - fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState]) - time.Sleep(time.Second / 2) - if cb != nil { - cb() - } - } -} - -func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) { - subCtx, cancel := context.WithCancel(ctx) - defer cancel() - updates, err := miner.MarketGetDealUpdates(subCtx) - if err != nil { - t.Fatal(err) - } - for { - select { - case <-ctx.Done(): - t.Fatal("context timeout") - case di := <-updates: - if deal.Equals(di.ProposalCid) { - switch di.State { - case storagemarket.StorageDealProposalRejected: - t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - t.Fatal("deal failed") - case storagemarket.StorageDealError: - t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive: - fmt.Println("COMPLETE", di) - return - } - fmt.Println("Deal state: ", storagemarket.DealStates[di.State]) - } - } - } -} - -func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) { - snums, err := miner.SectorsList(ctx) - require.NoError(t, err) - - for _, snum := range snums { - si, err := miner.SectorsStatus(ctx, snum, false) - require.NoError(t, err) - - t.Logf("Sector %d state: %s", snum, si.State) - if si.State == api.SectorState(sealing.WaitDeals) { - require.NoError(t, miner.SectorStartSealing(ctx, snum)) - } - } - - flushSealingBatches(t, ctx, miner) -} - -func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { - offers, err := client.ClientFindData(ctx, fcid, piece) - if err != nil { - t.Fatal(err) - } - - if len(offers) < 1 { - t.Fatal("no offers") - } - - rpath, err := ioutil.TempDir("", "lotus-retrieve-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rpath) //nolint:errcheck - - caddr, err := client.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - ref := &api.FileRef{ - Path: filepath.Join(rpath, "ret"), - IsCAR: carExport, - } - updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) - if err != nil { - t.Fatal(err) - } - for update := range updates { - if update.Err != "" { - t.Fatalf("retrieval failed: %s", update.Err) - } - } - - rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret")) - if err != nil { - t.Fatal(err) - } - - if carExport { - rdata = extractCarData(t, ctx, rdata, rpath) - } - - if !bytes.Equal(rdata, data) { - t.Fatal("wrong data retrieved") - } -} - -func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte { - bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) - if err != nil { - t.Fatal(err) - } - b, err := bserv.GetBlock(ctx, ch.Roots[0]) - if err != nil { - t.Fatal(err) - } - nd, err := ipld.Decode(b) - if err != nil { - t.Fatal(err) - } - dserv := dag.NewDAGService(bserv) - fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) - if err != nil { - t.Fatal(err) - } - outPath := filepath.Join(rpath, "retLoadedCAR") - if err := files.WriteTo(fil, outPath); err != nil { - t.Fatal(err) - } - rdata, err = ioutil.ReadFile(outPath) - if err != nil { - t.Fatal(err) - } - return rdata -} - -type dealsScaffold struct { - ctx context.Context - client *impl.FullNodeAPI - miner TestStorageNode - blockMiner *BlockMiner -} - -func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold { - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - return connectAndStartMining(t, b, blocktime, client, miner) -} - -func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold { - ctx := context.Background() - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - blockMiner := NewBlockMiner(ctx, t, miner, blocktime) - blockMiner.MineBlocks() - - return &dealsScaffold{ - ctx: ctx, - client: client, - miner: miner, - blockMiner: blockMiner, - } -} diff --git a/api/test/mining.go b/api/test/mining.go deleted file mode 100644 index d6dea9abf..000000000 --- a/api/test/mining.go +++ /dev/null @@ -1,240 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "math/rand" - "sync/atomic" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node/impl" -) - -//nolint:deadcode,varcheck -var log = logging.Logger("apitest") - -func (ts *testSuite) testMining(t *testing.T) { - ctx := context.Background() - apis, sn := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] - - newHeads, err := api.ChainNotify(ctx) - require.NoError(t, err) - initHead := (<-newHeads)[0] - baseHeight := initHead.Val.Height() - - h1, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, int64(h1.Height()), int64(baseHeight)) - - MineUntilBlock(ctx, t, apis[0], sn[0], nil) - require.NoError(t, err) - - <-newHeads - - h2, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Greater(t, int64(h2.Height()), int64(h1.Height())) -} - -func (ts *testSuite) testMiningReal(t *testing.T) { - build.InsecurePoStValidation = false - defer func() { - build.InsecurePoStValidation = true - }() - - ctx := context.Background() - apis, sn := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] - - newHeads, err := api.ChainNotify(ctx) - require.NoError(t, err) - at := (<-newHeads)[0].Val.Height() - - h1, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, int64(at), int64(h1.Height())) - - MineUntilBlock(ctx, t, apis[0], sn[0], nil) - require.NoError(t, err) - - <-newHeads - - h2, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Greater(t, int64(h2.Height()), int64(h1.Height())) - - MineUntilBlock(ctx, t, apis[0], sn[0], nil) - require.NoError(t, err) - - <-newHeads - - h3, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Greater(t, int64(h3.Height()), int64(h2.Height())) -} - -func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { - // test making a deal with a fresh miner, and see if it starts to mine - - ctx := context.Background() - n, sn := b(t, OneFull, []StorageMiner{ - {Full: 0, Preseal: PresealGenesis}, - {Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node - }) - client := n[0].FullNode.(*impl.FullNodeAPI) - provider := sn[1] - genesisMiner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := provider.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - - if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - data := make([]byte, 600) - rand.New(rand.NewSource(5)).Read(data) - - r := bytes.NewReader(data) - fcid, err := client.ClientImportLocal(ctx, r) - if err != nil { - t.Fatal(err) - } - - fmt.Println("FILE CID: ", fcid) - - var mine int32 = 1 - done := make(chan struct{}) - minedTwo := make(chan struct{}) - - m2addr, err := sn[1].ActorAddress(context.TODO()) - if err != nil { - t.Fatal(err) - } - - go func() { - defer close(done) - - complChan := minedTwo - for atomic.LoadInt32(&mine) != 0 { - wait := make(chan int) - mdone := func(mined bool, _ abi.ChainEpoch, err error) { - n := 0 - if mined { - n = 1 - } - wait <- n - } - - if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil { - t.Error(err) - } - - if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil { - t.Error(err) - } - - expect := <-wait - expect += <-wait - - time.Sleep(blocktime) - if expect == 0 { - // null block - continue - } - - var nodeOneMined bool - for _, node := range sn { - mb, err := node.MiningBase(ctx) - if err != nil { - t.Error(err) - return - } - - for _, b := range mb.Blocks() { - if b.Miner == m2addr { - nodeOneMined = true - break - } - } - - } - - if nodeOneMined && complChan != nil { - close(complChan) - complChan = nil - } - - } - }() - - deal := startDeal(t, ctx, provider, client, fcid, false, 0) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - - waitDealSealed(t, ctx, provider, client, deal, false, false, nil) - - <-minedTwo - - atomic.StoreInt32(&mine, 0) - fmt.Println("shutting down mining") - <-done -} - -func (ts *testSuite) testNonGenesisMiner(t *testing.T) { - ctx := context.Background() - n, sn := ts.makeNodes(t, []FullNodeOpts{ - FullNodeWithLatestActorsAt(-1), - }, []StorageMiner{ - {Full: 0, Preseal: PresealGenesis}, - }) - - full, ok := n[0].FullNode.(*impl.FullNodeAPI) - if !ok { - t.Skip("not testing with a full node") - return - } - genesisMiner := sn[0] - - bm := NewBlockMiner(ctx, t, genesisMiner, 4*time.Millisecond) - bm.MineBlocks() - t.Cleanup(bm.Stop) - - gaa, err := genesisMiner.ActorAddress(ctx) - require.NoError(t, err) - - gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK) - require.NoError(t, err) - - testm := n[0].Stb(ctx, t, TestSpt, gmi.Owner) - - ta, err := testm.ActorAddress(ctx) - require.NoError(t, err) - - tid, err := address.IDFromAddress(ta) - require.NoError(t, err) - - require.Equal(t, uint64(1001), tid) -} diff --git a/api/test/pledge.go b/api/test/pledge.go deleted file mode 100644 index 08548dc60..000000000 --- a/api/test/pledge.go +++ /dev/null @@ -1,389 +0,0 @@ -package test - -import ( - "context" - "fmt" - "sort" - "strings" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/stmgr" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - bminer "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" -) - -func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - pledge := make(chan struct{}) - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - round := 0 - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - - // 3 sealing rounds: before, during after. - if round >= 3 { - continue - } - - head, err := client.ChainHead(ctx) - assert.NoError(t, err) - - // rounds happen every 100 blocks, with a 50 block offset. - if head.Height() >= abi.ChainEpoch(round*500+50) { - round++ - pledge <- struct{}{} - - ver, err := client.StateNetworkVersion(ctx, head.Key()) - assert.NoError(t, err) - switch round { - case 1: - assert.Equal(t, network.Version6, ver) - case 2: - assert.Equal(t, network.Version7, ver) - case 3: - assert.Equal(t, network.Version8, ver) - } - } - - } - }() - - // before. - pledgeSectors(t, ctx, miner, 9, 0, pledge) - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - sort.Slice(s, func(i, j int) bool { - return s[i] < s[j] - }) - - for i, id := range s { - info, err := miner.SectorsStatus(ctx, id, true) - require.NoError(t, err) - expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 - if i >= 3 { - // after - expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 - } - assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) - } - - atomic.StoreInt64(&mine, 0) - <-done -} - -func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - for { - h, err := client.ChainHead(ctx) - require.NoError(t, err) - if h.Height() > 10 { - break - } - } - - toCheck := startPledge(t, ctx, miner, nSectors, 0, nil) - - for len(toCheck) > 0 { - states := map[api.SectorState]int{} - - for n := range toCheck { - st, err := miner.SectorsStatus(ctx, n, false) - require.NoError(t, err) - states[st.State]++ - if st.State == api.SectorState(sealing.Proving) { - delete(toCheck, n) - } - if strings.Contains(string(st.State), "Fail") { - t.Fatal("sector in a failed state", st.State) - } - } - if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors || - (states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) { - pcb, err := miner.SectorPreCommitFlush(ctx) - require.NoError(t, err) - if pcb != nil { - fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) - } - } - - if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors || - (states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) { - cb, err := miner.SectorCommitFlush(ctx) - require.NoError(t, err) - if cb != nil { - fmt.Printf("COMMIT BATCH: %+v\n", cb) - } - } - - build.Clock.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) - } - - atomic.StoreInt64(&mine, 0) - <-done -} - -func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{ - { - Opts: func(nodes []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - Network: network.Version9, - Height: 1, - Migration: stmgr.UpgradeActorsV2, - }, { - Network: network.Version10, - Height: 2, - Migration: stmgr.UpgradeActorsV3, - }, { - Network: network.Version12, - Height: 3, - Migration: stmgr.UpgradeActorsV4, - }, { - Network: network.Version13, - Height: 1000000000, - Migration: stmgr.UpgradeActorsV5, - }}) - }, - }, - }, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - for { - h, err := client.ChainHead(ctx) - require.NoError(t, err) - if h.Height() > 10 { - break - } - } - - toCheck := startPledge(t, ctx, miner, nSectors, 0, nil) - - for len(toCheck) > 0 { - states := map[api.SectorState]int{} - - for n := range toCheck { - st, err := miner.SectorsStatus(ctx, n, false) - require.NoError(t, err) - states[st.State]++ - if st.State == api.SectorState(sealing.Proving) { - delete(toCheck, n) - } - if strings.Contains(string(st.State), "Fail") { - t.Fatal("sector in a failed state", st.State) - } - } - - build.Clock.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) - } - - atomic.StoreInt64(&mine, 0) - <-done -} - -func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, OneFull, OneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) != 0 { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { - - }}); err != nil { - t.Error(err) - } - } - }() - - pledgeSectors(t, ctx, miner, nSectors, 0, nil) - - atomic.StoreInt64(&mine, 0) - <-done -} - -func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) { - pcb, err := miner.SectorPreCommitFlush(ctx) - require.NoError(t, err) - if pcb != nil { - fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) - } - - cb, err := miner.SectorCommitFlush(ctx) - require.NoError(t, err) - if cb != nil { - fmt.Printf("COMMIT BATCH: %+v\n", cb) - } -} - -func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { - for i := 0; i < n; i++ { - if i%3 == 0 && blockNotif != nil { - <-blockNotif - log.Errorf("WAIT") - } - log.Errorf("PLEDGING %d", i) - _, err := miner.PledgeSector(ctx) - require.NoError(t, err) - } - - for { - s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM - require.NoError(t, err) - fmt.Printf("Sectors: %d\n", len(s)) - if len(s) >= n+existing { - break - } - - build.Clock.Sleep(100 * time.Millisecond) - } - - fmt.Printf("All sectors is fsm\n") - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - - toCheck := map[abi.SectorNumber]struct{}{} - for _, number := range s { - toCheck[number] = struct{}{} - } - - return toCheck -} - -func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) { - toCheck := startPledge(t, ctx, miner, n, existing, blockNotif) - - for len(toCheck) > 0 { - flushSealingBatches(t, ctx, miner) - - states := map[api.SectorState]int{} - for n := range toCheck { - st, err := miner.SectorsStatus(ctx, n, false) - require.NoError(t, err) - states[st.State]++ - if st.State == api.SectorState(sealing.Proving) { - delete(toCheck, n) - } - if strings.Contains(string(st.State), "Fail") { - t.Fatal("sector in a failed state", st.State) - } - } - - build.Clock.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) - } -} diff --git a/api/test/test.go b/api/test/test.go deleted file mode 100644 index 8f8d95100..000000000 --- a/api/test/test.go +++ /dev/null @@ -1,323 +0,0 @@ -package test - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/multiformats/go-multiaddr" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node" -) - -func init() { - logging.SetAllLoggers(logging.LevelInfo) - err := os.Setenv("BELLMAN_NO_GPU", "1") - if err != nil { - panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err)) - } - build.InsecurePoStValidation = true -} - -type StorageBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestStorageNode - -type TestNode struct { - v1api.FullNode - // ListenAddr is the address on which an API server is listening, if an - // API server is created for this Node - ListenAddr multiaddr.Multiaddr - - Stb StorageBuilder -} - -type TestStorageNode struct { - lapi.StorageMiner - // ListenAddr is the address on which an API server is listening, if an - // API server is created for this Node - ListenAddr multiaddr.Multiaddr - - MineOne func(context.Context, miner.MineReq) error - Stop func(context.Context) error -} - -var PresealGenesis = -1 - -const GenesisPreseals = 2 - -const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1 - -// Options for setting up a mock storage miner -type StorageMiner struct { - Full int - Opts node.Option - Preseal int -} - -type OptionGenerator func([]TestNode) node.Option - -// Options for setting up a mock full node -type FullNodeOpts struct { - Lite bool // run node in "lite" mode - Opts OptionGenerator // generate dependency injection options -} - -// APIBuilder is a function which is invoked in test suite to provide -// test nodes and networks -// -// fullOpts array defines options for each full node -// storage array defines storage nodes, numbers in the array specify full node -// index the storage node 'belongs' to -type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestNode, []TestStorageNode) -type testSuite struct { - makeNodes APIBuilder -} - -// TestApis is the entry point to API test suite -func TestApis(t *testing.T, b APIBuilder) { - ts := testSuite{ - makeNodes: b, - } - - t.Run("version", ts.testVersion) - t.Run("id", ts.testID) - t.Run("testConnectTwo", ts.testConnectTwo) - t.Run("testMining", ts.testMining) - t.Run("testMiningReal", ts.testMiningReal) - t.Run("testSearchMsg", ts.testSearchMsg) - t.Run("testNonGenesisMiner", ts.testNonGenesisMiner) -} - -func DefaultFullOpts(nFull int) []FullNodeOpts { - full := make([]FullNodeOpts, nFull) - for i := range full { - full[i] = FullNodeOpts{ - Opts: func(nodes []TestNode) node.Option { - return node.Options() - }, - } - } - return full -} - -var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}} -var OneFull = DefaultFullOpts(1) -var TwoFull = DefaultFullOpts(2) - -var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { - if upgradeHeight == -1 { - // Attention: Update this when introducing new actor versions or your tests will be sad - upgradeHeight = 4 - } - - return FullNodeOpts{ - Opts: func(nodes []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - // prepare for upgrade. - Network: network.Version9, - Height: 1, - Migration: stmgr.UpgradeActorsV2, - }, { - Network: network.Version10, - Height: 2, - Migration: stmgr.UpgradeActorsV3, - }, { - Network: network.Version12, - Height: 3, - Migration: stmgr.UpgradeActorsV4, - }, { - Network: network.Version13, - Height: upgradeHeight, - Migration: stmgr.UpgradeActorsV5, - }}) - }, - } -} - -var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts { - return FullNodeOpts{ - Opts: func(nodes []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - Network: network.Version6, - Height: 1, - Migration: stmgr.UpgradeActorsV2, - }, { - Network: network.Version7, - Height: calico, - Migration: stmgr.UpgradeCalico, - }, { - Network: network.Version8, - Height: persian, - }}) - }, - } -} - -var FullNodeWithV4ActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { - if upgradeHeight == -1 { - upgradeHeight = 3 - } - - return FullNodeOpts{ - Opts: func(nodes []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ - // prepare for upgrade. - Network: network.Version9, - Height: 1, - Migration: stmgr.UpgradeActorsV2, - }, { - Network: network.Version10, - Height: 2, - Migration: stmgr.UpgradeActorsV3, - }, { - Network: network.Version12, - Height: upgradeHeight, - Migration: stmgr.UpgradeActorsV4, - }}) - }, - } -} - -var MineNext = miner.MineReq{ - InjectNulls: 0, - Done: func(bool, abi.ChainEpoch, error) {}, -} - -func (ts *testSuite) testVersion(t *testing.T) { - lapi.RunningNodeType = lapi.NodeFull - t.Cleanup(func() { - lapi.RunningNodeType = lapi.NodeUnknown - }) - - ctx := context.Background() - apis, _ := ts.makeNodes(t, OneFull, OneMiner) - napi := apis[0] - - v, err := napi.Version(ctx) - if err != nil { - t.Fatal(err) - } - versions := strings.Split(v.Version, "+") - if len(versions) <= 0 { - t.Fatal("empty version") - } - require.Equal(t, versions[0], build.BuildVersion) -} - -func (ts *testSuite) testSearchMsg(t *testing.T) { - apis, miners := ts.makeNodes(t, OneFull, OneMiner) - - api := apis[0] - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - senderAddr, err := api.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - msg := &types.Message{ - From: senderAddr, - To: senderAddr, - Value: big.Zero(), - } - bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond) - bm.MineBlocks() - defer bm.Stop() - - sm, err := api.MpoolPushMessage(ctx, msg, nil) - if err != nil { - t.Fatal(err) - } - res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("did not successfully send message") - } - - searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) - if err != nil { - t.Fatal(err) - } - - if searchRes.TipSet != res.TipSet { - t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) - } - -} - -func (ts *testSuite) testID(t *testing.T) { - ctx := context.Background() - apis, _ := ts.makeNodes(t, OneFull, OneMiner) - api := apis[0] - - id, err := api.ID(ctx) - if err != nil { - t.Fatal(err) - } - assert.Regexp(t, "^12", id.Pretty()) -} - -func (ts *testSuite) testConnectTwo(t *testing.T) { - ctx := context.Background() - apis, _ := ts.makeNodes(t, TwoFull, OneMiner) - - p, err := apis[0].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 0 { - t.Error("Node 0 has a peer") - } - - p, err = apis[1].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 0 { - t.Error("Node 1 has a peer") - } - - addrs, err := apis[1].NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := apis[0].NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - p, err = apis[0].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 1 { - t.Error("Node 0 doesn't have 1 peer") - } - - p, err = apis[1].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 1 { - t.Error("Node 0 doesn't have 1 peer") - } -} diff --git a/api/test/util.go b/api/test/util.go deleted file mode 100644 index f571b48da..000000000 --- a/api/test/util.go +++ /dev/null @@ -1,87 +0,0 @@ -package test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/go-address" - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/miner" -) - -func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) { - senderAddr, err := sender.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - msg := &types.Message{ - From: senderAddr, - To: addr, - Value: amount, - } - - sm, err := sender.MpoolPushMessage(ctx, msg, nil) - if err != nil { - t.Fatal(err) - } - res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("did not successfully send money") - } -} - -func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) { - for i := 0; i < 1000; i++ { - var success bool - var err error - var epoch abi.ChainEpoch - wait := make(chan struct{}) - mineErr := sn.MineOne(ctx, miner.MineReq{ - Done: func(win bool, ep abi.ChainEpoch, e error) { - success = win - err = e - epoch = ep - wait <- struct{}{} - }, - }) - if mineErr != nil { - t.Fatal(mineErr) - } - <-wait - if err != nil { - t.Fatal(err) - } - if success { - // Wait until it shows up on the given full nodes ChainHead - nloops := 50 - for i := 0; i < nloops; i++ { - ts, err := fn.ChainHead(ctx) - if err != nil { - t.Fatal(err) - } - if ts.Height() == epoch { - break - } - if i == nloops-1 { - t.Fatal("block never managed to sync to node") - } - time.Sleep(time.Millisecond * 10) - } - - if cb != nil { - cb(epoch) - } - return - } - t.Log("did not mine block, trying again", i) - } - t.Fatal("failed to mine 1000 times in a row...") -} diff --git a/api/test/window_post.go b/api/test/window_post.go deleted file mode 100644 index 555195365..000000000 --- a/api/test/window_post.go +++ /dev/null @@ -1,1002 +0,0 @@ -package test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/big" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/lotus/extern/sector-storage/mock" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" - proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" - "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" - minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl" -) - -func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - for _, height := range []abi.ChainEpoch{ - -1, // before - 162, // while sealing - 5000, // while proving - } { - height := height // copy to satisfy lints - t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { - testWindowPostUpgrade(t, b, blocktime, nSectors, height) - }) - } - -} - -func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int, - upgradeHeight abi.ChainEpoch) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeHeight)}, OneMiner) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - pledgeSectors(t, ctx, miner, nSectors, 0, nil) - - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - mid, err := address.IDFromAddress(maddr) - require.NoError(t, err) - - fmt.Printf("Running one proving period\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - build.Clock.Sleep(blocktime) - } - - p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - ssz, err := miner.ActorSectorSize(ctx, maddr) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals))) - - fmt.Printf("Drop some sectors\n") - - // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) - { - parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK) - require.NoError(t, err) - require.Greater(t, len(parts), 0) - - secs := parts[0].AllSectors - n, err := secs.Count() - require.NoError(t, err) - require.Equal(t, uint64(2), n) - - // Drop the partition - err = secs.ForEach(func(sid uint64) error { - return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(sid), - }, - }, true) - }) - require.NoError(t, err) - } - - var s storage.SectorRef - - // Drop 1 sectors from deadline 3 partition 0 - { - parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK) - require.NoError(t, err) - require.Greater(t, len(parts), 0) - - secs := parts[0].AllSectors - n, err := secs.Count() - require.NoError(t, err) - require.Equal(t, uint64(2), n) - - // Drop the sector - sn, err := secs.First() - require.NoError(t, err) - - all, err := secs.All(2) - require.NoError(t, err) - fmt.Println("the sectors", all) - - s = storage.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(sn), - }, - } - - err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true) - require.NoError(t, err) - } - - di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - fmt.Printf("Go through another PP, wait for sectors to become faulty\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - - build.Clock.Sleep(blocktime) - } - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - - sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+GenesisPreseals-3, int(sectors)) // -3 just removed sectors - - fmt.Printf("Recover one sector\n") - - err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) - require.NoError(t, err) - - di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - - build.Clock.Sleep(blocktime) - } - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - - sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+GenesisPreseals-2, int(sectors)) // -2 not recovered sectors - - // pledge a sector after recovery - - pledgeSectors(t, ctx, miner, 1, nSectors, nil) - - { - // Wait until proven. - di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 - fmt.Printf("End for head.Height > %d\n", waitUntil) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > waitUntil { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - } - } - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - - sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) - require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged -} - -func TestTerminate(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - nSectors := uint64(2) - - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, []StorageMiner{{Full: 0, Preseal: int(nSectors)}}) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - ssz, err := miner.ActorSectorSize(ctx, maddr) - require.NoError(t, err) - - p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors)) - - fmt.Printf("Seal a sector\n") - - pledgeSectors(t, ctx, miner, 1, 0, nil) - - fmt.Printf("wait for power\n") - - { - // Wait until proven. - di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 - fmt.Printf("End for head.Height > %d\n", waitUntil) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > waitUntil { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - } - } - - nSectors++ - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*nSectors)) - - fmt.Println("Terminate a sector") - - toTerminate := abi.SectorNumber(3) - - err = miner.SectorTerminate(ctx, toTerminate) - require.NoError(t, err) - - msgTriggerred := false -loop: - for { - si, err := miner.SectorsStatus(ctx, toTerminate, false) - require.NoError(t, err) - - fmt.Println("state: ", si.State, msgTriggerred) - - switch sealing.SectorState(si.State) { - case sealing.Terminating: - if !msgTriggerred { - { - p, err := miner.SectorTerminatePending(ctx) - require.NoError(t, err) - require.Len(t, p, 1) - require.Equal(t, abi.SectorNumber(3), p[0].Number) - } - - c, err := miner.SectorTerminateFlush(ctx) - require.NoError(t, err) - if c != nil { - msgTriggerred = true - fmt.Println("terminate message:", c) - - { - p, err := miner.SectorTerminatePending(ctx) - require.NoError(t, err) - require.Len(t, p, 0) - } - } - } - case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed: - break loop - } - - time.Sleep(100 * time.Millisecond) - } - - // check power decreased - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1))) - - // check in terminated set - { - parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK) - require.NoError(t, err) - require.Greater(t, len(parts), 0) - - bflen := func(b bitfield.BitField) uint64 { - l, err := b.Count() - require.NoError(t, err) - return l - } - - require.Equal(t, uint64(1), bflen(parts[0].AllSectors)) - require.Equal(t, uint64(0), bflen(parts[0].LiveSectors)) - } - - di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - build.Clock.Sleep(blocktime) - } - require.NoError(t, err) - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2) - - p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1))) -} - -func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // First, we configure two miners. After sealing, we're going to turn off the first miner so - // it doesn't submit proofs. - /// - // Then we're going to manually submit bad proofs. - n, sn := b(t, []FullNodeOpts{ - FullNodeWithV4ActorsAt(-1), - }, []StorageMiner{ - {Full: 0, Preseal: PresealGenesis}, - {Full: 0}, - }) - - client := n[0].FullNode.(*impl.FullNodeAPI) - chainMiner := sn[0] - evilMiner := sn[1] - - { - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := chainMiner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - - if err := evilMiner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - } - - defaultFrom, err := client.WalletDefaultAddress(ctx) - require.NoError(t, err) - - build.Clock.Sleep(time.Second) - - // Mine with the _second_ node (the good one). - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := chainMiner.MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - // Give the chain miner enough sectors to win every block. - pledgeSectors(t, ctx, chainMiner, 10, 0, nil) - // And the evil one 1 sector. No cookie for you. - pledgeSectors(t, ctx, evilMiner, 1, 0, nil) - - // Let the evil miner's sectors gain power. - evilMinerAddr, err := evilMiner.ActorAddress(ctx) - require.NoError(t, err) - - di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - - fmt.Printf("Running one proving period\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - build.Clock.Sleep(blocktime) - } - - p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - - ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr) - require.NoError(t, err) - - // make sure it has gained power. - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) - - evilSectors, err := evilMiner.SectorsList(ctx) - require.NoError(t, err) - evilSectorNo := evilSectors[0] // only one. - evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK) - require.NoError(t, err) - - fmt.Println("evil miner stopping") - - // Now stop the evil miner, and start manually submitting bad proofs. - require.NoError(t, evilMiner.Stop(ctx)) - - fmt.Println("evil miner stopped") - - // Wait until we need to prove our sector. - for { - di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { - break - } - build.Clock.Sleep(blocktime) - } - - err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) - require.NoError(t, err, "evil proof not accepted") - - // Wait until after the proving period. - for { - di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - if di.Index != evilSectorLoc.Deadline { - break - } - build.Clock.Sleep(blocktime) - } - - fmt.Println("accepted evil proof") - - // Make sure the evil node didn't lose any power. - p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) - - // OBJECTION! The good miner files a DISPUTE!!!! - { - params := &minerActor.DisputeWindowedPoStParams{ - Deadline: evilSectorLoc.Deadline, - PoStIndex: 0, - } - - enc, aerr := actors.SerializeParams(params) - require.NoError(t, aerr) - - msg := &types.Message{ - To: evilMinerAddr, - Method: minerActor.Methods.DisputeWindowedPoSt, - Params: enc, - Value: types.NewInt(0), - From: defaultFrom, - } - sm, err := client.MpoolPushMessage(ctx, msg, nil) - require.NoError(t, err) - - fmt.Println("waiting dispute") - rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) - require.NoError(t, err) - require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error()) - } - - // Objection SUSTAINED! - // Make sure the evil node lost power. - p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - require.True(t, p.MinerPower.RawBytePower.IsZero()) - - // Now we begin the redemption arc. - require.True(t, p.MinerPower.RawBytePower.IsZero()) - - // First, recover the sector. - - { - minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - - params := &minerActor.DeclareFaultsRecoveredParams{ - Recoveries: []minerActor.RecoveryDeclaration{{ - Deadline: evilSectorLoc.Deadline, - Partition: evilSectorLoc.Partition, - Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}), - }}, - } - - enc, aerr := actors.SerializeParams(params) - require.NoError(t, aerr) - - msg := &types.Message{ - To: evilMinerAddr, - Method: minerActor.Methods.DeclareFaultsRecovered, - Params: enc, - Value: types.FromFil(30), // repay debt. - From: minerInfo.Owner, - } - sm, err := client.MpoolPushMessage(ctx, msg, nil) - require.NoError(t, err) - - rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) - require.NoError(t, err) - require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error()) - } - - // Then wait for the deadline. - for { - di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) - require.NoError(t, err) - if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { - break - } - build.Clock.Sleep(blocktime) - } - - // Now try to be evil again - err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) - require.Error(t, err) - require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt") - - // It didn't work because we're recovering. -} - -func submitBadProof( - ctx context.Context, - client api.FullNode, maddr address.Address, - di *dline.Info, dlIdx, partIdx uint64, -) error { - head, err := client.ChainHead(ctx) - if err != nil { - return err - } - - from, err := client.WalletDefaultAddress(ctx) - if err != nil { - return err - } - - minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key()) - if err != nil { - return err - } - - commEpoch := di.Open - commRand, err := client.ChainGetRandomnessFromTickets( - ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit, - commEpoch, nil, - ) - if err != nil { - return err - } - params := &minerActor.SubmitWindowedPoStParams{ - ChainCommitEpoch: commEpoch, - ChainCommitRand: commRand, - Deadline: dlIdx, - Partitions: []minerActor.PoStPartition{{Index: partIdx}}, - Proofs: []proof3.PoStProof{{ - PoStProof: minerInfo.WindowPoStProofType, - ProofBytes: []byte("I'm soooo very evil."), - }}, - } - - enc, aerr := actors.SerializeParams(params) - if aerr != nil { - return aerr - } - - msg := &types.Message{ - To: maddr, - Method: minerActor.Methods.SubmitWindowedPoSt, - Params: enc, - Value: types.NewInt(0), - From: from, - } - sm, err := client.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return err - } - - rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) - if err != nil { - return err - } - if rec.Receipt.ExitCode.IsError() { - return rec.Receipt.ExitCode - } - return nil -} - -func TestWindowPostDisputeFails(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithV4ActorsAt(-1)}, OneMiner) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - { - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - } - - defaultFrom, err := client.WalletDefaultAddress(ctx) - require.NoError(t, err) - - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - build.Clock.Sleep(time.Second) - - // Mine with the _second_ node (the good one). - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := miner.MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - pledgeSectors(t, ctx, miner, 10, 0, nil) - - di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - fmt.Printf("Running one proving period\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2) - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) - break - } - build.Clock.Sleep(blocktime) - } - - ssz, err := miner.ActorSectorSize(ctx, maddr) - require.NoError(t, err) - expectedPower := types.NewInt(uint64(ssz) * (GenesisPreseals + 10)) - - p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - // make sure it has gained power. - require.Equal(t, p.MinerPower.RawBytePower, expectedPower) - - // Wait until a proof has been submitted. - var targetDeadline uint64 -waitForProof: - for { - deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - for dlIdx, dl := range deadlines { - nonEmpty, err := dl.PostSubmissions.IsEmpty() - require.NoError(t, err) - if nonEmpty { - targetDeadline = uint64(dlIdx) - break waitForProof - } - } - - build.Clock.Sleep(blocktime) - } - - for { - di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - // wait until the deadline finishes. - if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) { - break - } - - build.Clock.Sleep(blocktime) - } - - // Try to object to the proof. This should fail. - { - params := &minerActor.DisputeWindowedPoStParams{ - Deadline: targetDeadline, - PoStIndex: 0, - } - - enc, aerr := actors.SerializeParams(params) - require.NoError(t, aerr) - - msg := &types.Message{ - To: maddr, - Method: minerActor.Methods.DisputeWindowedPoSt, - Params: enc, - Value: types.NewInt(0), - From: defaultFrom, - } - _, err := client.MpoolPushMessage(ctx, msg, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)") - } -} - -func TestWindowPostBaseFeeNoBurn(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - och := build.UpgradeClausHeight - build.UpgradeClausHeight = 10 - n, sn := b(t, DefaultFullOpts(1), OneMiner) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - { - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - } - - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := miner.MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - pledgeSectors(t, ctx, miner, 10, 0, nil) - wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) - require.NoError(t, err) - en := wact.Nonce - - // wait for a new message to be sent from worker address, it will be a PoSt - -waitForProof: - for { - wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) - require.NoError(t, err) - if wact.Nonce > en { - break waitForProof - } - - build.Clock.Sleep(blocktime) - } - - slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) - require.NoError(t, err) - - pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) - require.NoError(t, err) - - require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero()) - - build.UpgradeClausHeight = och -} - -func TestWindowPostBaseFeeBurn(t *testing.T, b APIBuilder, blocktime time.Duration) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - { - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - } - - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := miner.MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - pledgeSectors(t, ctx, miner, 10, 0, nil) - wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) - require.NoError(t, err) - en := wact.Nonce - - // wait for a new message to be sent from worker address, it will be a PoSt - -waitForProof: - for { - wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) - require.NoError(t, err) - if wact.Nonce > en { - break waitForProof - } - - build.Clock.Sleep(blocktime) - } - - slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) - require.NoError(t, err) - - pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) - require.NoError(t, err) - - require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero()) -} diff --git a/api/types.go b/api/types.go index 6417ce756..9d887b0a1 100644 --- a/api/types.go +++ b/api/types.go @@ -5,6 +5,9 @@ import ( "fmt" "time" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/lotus/chain/types" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -116,3 +119,79 @@ type ConnMgrInfo struct { Tags map[string]int Conns map[string]time.Time } + +type NodeStatus struct { + SyncStatus NodeSyncStatus + PeerStatus NodePeerStatus + ChainStatus NodeChainStatus +} + +type NodeSyncStatus struct { + Epoch uint64 + Behind uint64 +} + +type NodePeerStatus struct { + PeersToPublishMsgs int + PeersToPublishBlocks int +} + +type NodeChainStatus struct { + BlocksPerTipsetLast100 float64 + BlocksPerTipsetLastFinality float64 +} + +type CheckStatusCode int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckStatusCode -trimprefix=CheckStatus +const ( + _ CheckStatusCode = iota + // Message Checks + CheckStatusMessageSerialize + CheckStatusMessageSize + CheckStatusMessageValidity + CheckStatusMessageMinGas + CheckStatusMessageMinBaseFee + CheckStatusMessageBaseFee + CheckStatusMessageBaseFeeLowerBound + CheckStatusMessageBaseFeeUpperBound + CheckStatusMessageGetStateNonce + CheckStatusMessageNonce + CheckStatusMessageGetStateBalance + CheckStatusMessageBalance +) + +type CheckStatus struct { + Code CheckStatusCode + OK bool + Err string + Hint map[string]interface{} +} + +type MessageCheckStatus struct { + Cid cid.Cid + CheckStatus +} + +type MessagePrototype struct { + Message types.Message + ValidNonce bool +} + +type RetrievalInfo struct { + PayloadCID cid.Cid + ID retrievalmarket.DealID + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + + Status retrievalmarket.DealStatus + Message string // more information about deal state, particularly errors + Provider peer.ID + BytesReceived uint64 + BytesPaidFor uint64 + TotalPaid abi.TokenAmount + + TransferChannelID *datatransfer.ChannelID + DataTransfer *DataTransferChannel +} diff --git a/api/v0api/full.go b/api/v0api/full.go index 35100f032..3fad465bc 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -92,6 +92,9 @@ type FullNode interface { // specified block. ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read + // ChainGetMessagesInTipset returns message stores in current tipset + ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) //perm:read + // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. // If there are no blocks at the specified epoch, a tipset at an earlier epoch // will be returned. @@ -304,6 +307,8 @@ type FullNode interface { ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin // ClientStartDeal proposes a deal with a miner. ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin + // ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write // ClientGetDealInfo returns the latest information about a given deal. ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read // ClientListDeals returns information about the deals made by the local client. @@ -324,6 +329,10 @@ type FullNode interface { // of status updates. ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin // ClientQueryAsk returns a signed StorageAsk from the specified miner. + // ClientListRetrievals returns information about retrievals made by the local client + ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write + // ClientGetRetrievalUpdates returns status of updated retrieval deals + ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read // ClientCalcCommP calculates the CommP and data size of the specified CID ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read @@ -623,7 +632,7 @@ type FullNode interface { // proposal. This method of approval can be used to ensure you only approve // exactly the transaction you think you are. // It takes the following params: , , , , , - // , , + // , , MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign // MsigCancel cancels a previously-proposed multisig message diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go index 8a55b4c27..18a5ec7d6 100644 --- a/api/v0api/gateway.go +++ b/api/v0api/gateway.go @@ -63,6 +63,7 @@ type Gateway interface { StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) WalletBalance(context.Context, address.Address) (types.BigInt, error) + Version(context.Context) (api.APIVersion, error) } var _ Gateway = *new(FullNode) diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index 171a9e05a..9ef833f01 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -45,6 +45,8 @@ type FullNodeStruct struct { ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"` + ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) `perm:"read"` + ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"` ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"` @@ -97,6 +99,8 @@ type FullNodeStruct struct { ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"` + ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"` + ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"` @@ -107,6 +111,8 @@ type FullNodeStruct struct { ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"` + ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"` + ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"` ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` @@ -123,6 +129,8 @@ type FullNodeStruct struct { ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` + ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"` + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` @@ -443,6 +451,8 @@ type GatewayStruct struct { StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) `` + Version func(p0 context.Context) (api.APIVersion, error) `` + WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` } } @@ -506,6 +516,14 @@ func (s *FullNodeStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.M return nil, xerrors.New("method not supported") } +func (s *FullNodeStruct) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) { + return s.Internal.ChainGetMessagesInTipset(p0, p1) +} + +func (s *FullNodeStub) ChainGetMessagesInTipset(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) { + return *new([]api.Message), xerrors.New("method not supported") +} + func (s *FullNodeStruct) ChainGetNode(p0 context.Context, p1 string) (*api.IpldObject, error) { return s.Internal.ChainGetNode(p0, p1) } @@ -714,6 +732,14 @@ func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.Deal return nil, xerrors.New("method not supported") } +func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { + return s.Internal.ClientGetRetrievalUpdates(p0) +} + +func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) { + return nil, xerrors.New("method not supported") +} + func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { return s.Internal.ClientHasLocal(p0, p1) } @@ -754,6 +780,14 @@ func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, erro return *new([]api.Import), xerrors.New("method not supported") } +func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { + return s.Internal.ClientListRetrievals(p0) +} + +func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) { + return *new([]api.RetrievalInfo), xerrors.New("method not supported") +} + func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) } @@ -818,6 +852,14 @@ func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealPara return nil, xerrors.New("method not supported") } +func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + return s.Internal.ClientStatelessDeal(p0, p1) +} + +func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) { + return nil, xerrors.New("method not supported") +} + func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { return s.Internal.CreateBackup(p0, p1) } @@ -2066,6 +2108,14 @@ func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (* return nil, xerrors.New("method not supported") } +func (s *GatewayStruct) Version(p0 context.Context) (api.APIVersion, error) { + return s.Internal.Version(p0) +} + +func (s *GatewayStub) Version(p0 context.Context) (api.APIVersion, error) { + return *new(api.APIVersion), xerrors.New("method not supported") +} + func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { return s.Internal.WalletBalance(p0, p1) } diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index 165b07165..6a4ef690e 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -37,30 +37,30 @@ import ( protocol "github.com/libp2p/go-libp2p-core/protocol" ) -// MockFullNode is a mock of FullNode interface +// MockFullNode is a mock of FullNode interface. type MockFullNode struct { ctrl *gomock.Controller recorder *MockFullNodeMockRecorder } -// MockFullNodeMockRecorder is the mock recorder for MockFullNode +// MockFullNodeMockRecorder is the mock recorder for MockFullNode. type MockFullNodeMockRecorder struct { mock *MockFullNode } -// NewMockFullNode creates a new mock instance +// NewMockFullNode creates a new mock instance. func NewMockFullNode(ctrl *gomock.Controller) *MockFullNode { mock := &MockFullNode{ctrl: ctrl} mock.recorder = &MockFullNodeMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { return m.recorder } -// AuthNew mocks base method +// AuthNew mocks base method. func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AuthNew", arg0, arg1) @@ -69,13 +69,13 @@ func (m *MockFullNode) AuthNew(arg0 context.Context, arg1 []auth.Permission) ([] return ret0, ret1 } -// AuthNew indicates an expected call of AuthNew +// AuthNew indicates an expected call of AuthNew. func (mr *MockFullNodeMockRecorder) AuthNew(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthNew", reflect.TypeOf((*MockFullNode)(nil).AuthNew), arg0, arg1) } -// AuthVerify mocks base method +// AuthVerify mocks base method. func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Permission, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AuthVerify", arg0, arg1) @@ -84,13 +84,13 @@ func (m *MockFullNode) AuthVerify(arg0 context.Context, arg1 string) ([]auth.Per return ret0, ret1 } -// AuthVerify indicates an expected call of AuthVerify +// AuthVerify indicates an expected call of AuthVerify. func (mr *MockFullNodeMockRecorder) AuthVerify(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthVerify", reflect.TypeOf((*MockFullNode)(nil).AuthVerify), arg0, arg1) } -// BeaconGetEntry mocks base method +// BeaconGetEntry mocks base method. func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BeaconGetEntry", arg0, arg1) @@ -99,13 +99,13 @@ func (m *MockFullNode) BeaconGetEntry(arg0 context.Context, arg1 abi.ChainEpoch) return ret0, ret1 } -// BeaconGetEntry indicates an expected call of BeaconGetEntry +// BeaconGetEntry indicates an expected call of BeaconGetEntry. func (mr *MockFullNodeMockRecorder) BeaconGetEntry(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeaconGetEntry", reflect.TypeOf((*MockFullNode)(nil).BeaconGetEntry), arg0, arg1) } -// ChainDeleteObj mocks base method +// ChainDeleteObj mocks base method. func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainDeleteObj", arg0, arg1) @@ -113,13 +113,13 @@ func (m *MockFullNode) ChainDeleteObj(arg0 context.Context, arg1 cid.Cid) error return ret0 } -// ChainDeleteObj indicates an expected call of ChainDeleteObj +// ChainDeleteObj indicates an expected call of ChainDeleteObj. func (mr *MockFullNodeMockRecorder) ChainDeleteObj(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainDeleteObj", reflect.TypeOf((*MockFullNode)(nil).ChainDeleteObj), arg0, arg1) } -// ChainExport mocks base method +// ChainExport mocks base method. func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool, arg3 types.TipSetKey) (<-chan []byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainExport", arg0, arg1, arg2, arg3) @@ -128,13 +128,13 @@ func (m *MockFullNode) ChainExport(arg0 context.Context, arg1 abi.ChainEpoch, ar return ret0, ret1 } -// ChainExport indicates an expected call of ChainExport +// ChainExport indicates an expected call of ChainExport. func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) } -// ChainGetBlock mocks base method +// ChainGetBlock mocks base method. func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetBlock", arg0, arg1) @@ -143,13 +143,13 @@ func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types return ret0, ret1 } -// ChainGetBlock indicates an expected call of ChainGetBlock +// ChainGetBlock indicates an expected call of ChainGetBlock. func (mr *MockFullNodeMockRecorder) ChainGetBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlock", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlock), arg0, arg1) } -// ChainGetBlockMessages mocks base method +// ChainGetBlockMessages mocks base method. func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) (*api.BlockMessages, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetBlockMessages", arg0, arg1) @@ -158,13 +158,13 @@ func (m *MockFullNode) ChainGetBlockMessages(arg0 context.Context, arg1 cid.Cid) return ret0, ret1 } -// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages +// ChainGetBlockMessages indicates an expected call of ChainGetBlockMessages. func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1) } -// ChainGetGenesis mocks base method +// ChainGetGenesis mocks base method. func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetGenesis", arg0) @@ -173,13 +173,13 @@ func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, err return ret0, ret1 } -// ChainGetGenesis indicates an expected call of ChainGetGenesis +// ChainGetGenesis indicates an expected call of ChainGetGenesis. func (mr *MockFullNodeMockRecorder) ChainGetGenesis(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetGenesis", reflect.TypeOf((*MockFullNode)(nil).ChainGetGenesis), arg0) } -// ChainGetMessage mocks base method +// ChainGetMessage mocks base method. func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*types.Message, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetMessage", arg0, arg1) @@ -188,13 +188,28 @@ func (m *MockFullNode) ChainGetMessage(arg0 context.Context, arg1 cid.Cid) (*typ return ret0, ret1 } -// ChainGetMessage indicates an expected call of ChainGetMessage +// ChainGetMessage indicates an expected call of ChainGetMessage. func (mr *MockFullNodeMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessage), arg0, arg1) } -// ChainGetNode mocks base method +// ChainGetMessagesInTipset mocks base method. +func (m *MockFullNode) ChainGetMessagesInTipset(arg0 context.Context, arg1 types.TipSetKey) ([]api.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainGetMessagesInTipset", arg0, arg1) + ret0, _ := ret[0].([]api.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainGetMessagesInTipset indicates an expected call of ChainGetMessagesInTipset. +func (mr *MockFullNodeMockRecorder) ChainGetMessagesInTipset(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessagesInTipset", reflect.TypeOf((*MockFullNode)(nil).ChainGetMessagesInTipset), arg0, arg1) +} + +// ChainGetNode mocks base method. func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.IpldObject, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetNode", arg0, arg1) @@ -203,13 +218,13 @@ func (m *MockFullNode) ChainGetNode(arg0 context.Context, arg1 string) (*api.Ipl return ret0, ret1 } -// ChainGetNode indicates an expected call of ChainGetNode +// ChainGetNode indicates an expected call of ChainGetNode. func (mr *MockFullNodeMockRecorder) ChainGetNode(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetNode", reflect.TypeOf((*MockFullNode)(nil).ChainGetNode), arg0, arg1) } -// ChainGetParentMessages mocks base method +// ChainGetParentMessages mocks base method. func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid) ([]api.Message, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetParentMessages", arg0, arg1) @@ -218,13 +233,13 @@ func (m *MockFullNode) ChainGetParentMessages(arg0 context.Context, arg1 cid.Cid return ret0, ret1 } -// ChainGetParentMessages indicates an expected call of ChainGetParentMessages +// ChainGetParentMessages indicates an expected call of ChainGetParentMessages. func (mr *MockFullNodeMockRecorder) ChainGetParentMessages(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentMessages), arg0, arg1) } -// ChainGetParentReceipts mocks base method +// ChainGetParentReceipts mocks base method. func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid) ([]*types.MessageReceipt, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetParentReceipts", arg0, arg1) @@ -233,13 +248,13 @@ func (m *MockFullNode) ChainGetParentReceipts(arg0 context.Context, arg1 cid.Cid return ret0, ret1 } -// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts +// ChainGetParentReceipts indicates an expected call of ChainGetParentReceipts. func (mr *MockFullNodeMockRecorder) ChainGetParentReceipts(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetParentReceipts", reflect.TypeOf((*MockFullNode)(nil).ChainGetParentReceipts), arg0, arg1) } -// ChainGetPath mocks base method +// ChainGetPath mocks base method. func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSetKey) ([]*api.HeadChange, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetPath", arg0, arg1, arg2) @@ -248,13 +263,13 @@ func (m *MockFullNode) ChainGetPath(arg0 context.Context, arg1, arg2 types.TipSe return ret0, ret1 } -// ChainGetPath indicates an expected call of ChainGetPath +// ChainGetPath indicates an expected call of ChainGetPath. func (mr *MockFullNodeMockRecorder) ChainGetPath(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetPath", reflect.TypeOf((*MockFullNode)(nil).ChainGetPath), arg0, arg1, arg2) } -// ChainGetRandomnessFromBeacon mocks base method +// ChainGetRandomnessFromBeacon mocks base method. func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetRandomnessFromBeacon", arg0, arg1, arg2, arg3, arg4) @@ -263,13 +278,13 @@ func (m *MockFullNode) ChainGetRandomnessFromBeacon(arg0 context.Context, arg1 t return ret0, ret1 } -// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon +// ChainGetRandomnessFromBeacon indicates an expected call of ChainGetRandomnessFromBeacon. func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromBeacon(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromBeacon", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromBeacon), arg0, arg1, arg2, arg3, arg4) } -// ChainGetRandomnessFromTickets mocks base method +// ChainGetRandomnessFromTickets mocks base method. func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 types.TipSetKey, arg2 crypto.DomainSeparationTag, arg3 abi.ChainEpoch, arg4 []byte) (abi.Randomness, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetRandomnessFromTickets", arg0, arg1, arg2, arg3, arg4) @@ -278,13 +293,13 @@ func (m *MockFullNode) ChainGetRandomnessFromTickets(arg0 context.Context, arg1 return ret0, ret1 } -// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets +// ChainGetRandomnessFromTickets indicates an expected call of ChainGetRandomnessFromTickets. func (mr *MockFullNodeMockRecorder) ChainGetRandomnessFromTickets(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetRandomnessFromTickets", reflect.TypeOf((*MockFullNode)(nil).ChainGetRandomnessFromTickets), arg0, arg1, arg2, arg3, arg4) } -// ChainGetTipSet mocks base method +// ChainGetTipSet mocks base method. func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetTipSet", arg0, arg1) @@ -293,13 +308,13 @@ func (m *MockFullNode) ChainGetTipSet(arg0 context.Context, arg1 types.TipSetKey return ret0, ret1 } -// ChainGetTipSet indicates an expected call of ChainGetTipSet +// ChainGetTipSet indicates an expected call of ChainGetTipSet. func (mr *MockFullNodeMockRecorder) ChainGetTipSet(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSet", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSet), arg0, arg1) } -// ChainGetTipSetByHeight mocks base method +// ChainGetTipSetByHeight mocks base method. func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainGetTipSetByHeight", arg0, arg1, arg2) @@ -308,13 +323,13 @@ func (m *MockFullNode) ChainGetTipSetByHeight(arg0 context.Context, arg1 abi.Cha return ret0, ret1 } -// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight +// ChainGetTipSetByHeight indicates an expected call of ChainGetTipSetByHeight. func (mr *MockFullNodeMockRecorder) ChainGetTipSetByHeight(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetTipSetByHeight", reflect.TypeOf((*MockFullNode)(nil).ChainGetTipSetByHeight), arg0, arg1, arg2) } -// ChainHasObj mocks base method +// ChainHasObj mocks base method. func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) @@ -323,13 +338,13 @@ func (m *MockFullNode) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, er return ret0, ret1 } -// ChainHasObj indicates an expected call of ChainHasObj +// ChainHasObj indicates an expected call of ChainHasObj. func (mr *MockFullNodeMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockFullNode)(nil).ChainHasObj), arg0, arg1) } -// ChainHead mocks base method +// ChainHead mocks base method. func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainHead", arg0) @@ -338,13 +353,13 @@ func (m *MockFullNode) ChainHead(arg0 context.Context) (*types.TipSet, error) { return ret0, ret1 } -// ChainHead indicates an expected call of ChainHead +// ChainHead indicates an expected call of ChainHead. func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) } -// ChainNotify mocks base method +// ChainNotify mocks base method. func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainNotify", arg0) @@ -353,13 +368,13 @@ func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChan return ret0, ret1 } -// ChainNotify indicates an expected call of ChainNotify +// ChainNotify indicates an expected call of ChainNotify. func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0) } -// ChainReadObj mocks base method +// ChainReadObj mocks base method. func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainReadObj", arg0, arg1) @@ -368,13 +383,13 @@ func (m *MockFullNode) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, return ret0, ret1 } -// ChainReadObj indicates an expected call of ChainReadObj +// ChainReadObj indicates an expected call of ChainReadObj. func (mr *MockFullNodeMockRecorder) ChainReadObj(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainReadObj", reflect.TypeOf((*MockFullNode)(nil).ChainReadObj), arg0, arg1) } -// ChainSetHead mocks base method +// ChainSetHead mocks base method. func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainSetHead", arg0, arg1) @@ -382,13 +397,13 @@ func (m *MockFullNode) ChainSetHead(arg0 context.Context, arg1 types.TipSetKey) return ret0 } -// ChainSetHead indicates an expected call of ChainSetHead +// ChainSetHead indicates an expected call of ChainSetHead. func (mr *MockFullNodeMockRecorder) ChainSetHead(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainSetHead", reflect.TypeOf((*MockFullNode)(nil).ChainSetHead), arg0, arg1) } -// ChainStatObj mocks base method +// ChainStatObj mocks base method. func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (api.ObjStat, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainStatObj", arg0, arg1, arg2) @@ -397,13 +412,13 @@ func (m *MockFullNode) ChainStatObj(arg0 context.Context, arg1, arg2 cid.Cid) (a return ret0, ret1 } -// ChainStatObj indicates an expected call of ChainStatObj +// ChainStatObj indicates an expected call of ChainStatObj. func (mr *MockFullNodeMockRecorder) ChainStatObj(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStatObj", reflect.TypeOf((*MockFullNode)(nil).ChainStatObj), arg0, arg1, arg2) } -// ChainTipSetWeight mocks base method +// ChainTipSetWeight mocks base method. func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ChainTipSetWeight", arg0, arg1) @@ -412,13 +427,13 @@ func (m *MockFullNode) ChainTipSetWeight(arg0 context.Context, arg1 types.TipSet return ret0, ret1 } -// ChainTipSetWeight indicates an expected call of ChainTipSetWeight +// ChainTipSetWeight indicates an expected call of ChainTipSetWeight. func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) } -// ClientCalcCommP mocks base method +// ClientCalcCommP mocks base method. func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1) @@ -427,13 +442,13 @@ func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api. return ret0, ret1 } -// ClientCalcCommP indicates an expected call of ClientCalcCommP +// ClientCalcCommP indicates an expected call of ClientCalcCommP. func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1) } -// ClientCancelDataTransfer mocks base method +// ClientCancelDataTransfer mocks base method. func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3) @@ -441,13 +456,13 @@ func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datat return ret0 } -// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer +// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer. func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3) } -// ClientCancelRetrievalDeal mocks base method +// ClientCancelRetrievalDeal mocks base method. func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1) @@ -455,13 +470,13 @@ func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retr return ret0 } -// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal +// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal. func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1) } -// ClientDataTransferUpdates mocks base method +// ClientDataTransferUpdates mocks base method. func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0) @@ -470,13 +485,13 @@ func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan a return ret0, ret1 } -// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates +// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates. func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0) } -// ClientDealPieceCID mocks base method +// ClientDealPieceCID mocks base method. func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1) @@ -485,13 +500,13 @@ func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (a return ret0, ret1 } -// ClientDealPieceCID indicates an expected call of ClientDealPieceCID +// ClientDealPieceCID indicates an expected call of ClientDealPieceCID. func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1) } -// ClientDealSize mocks base method +// ClientDealSize mocks base method. func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1) @@ -500,13 +515,13 @@ func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.D return ret0, ret1 } -// ClientDealSize indicates an expected call of ClientDealSize +// ClientDealSize indicates an expected call of ClientDealSize. func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) } -// ClientFindData mocks base method +// ClientFindData mocks base method. func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2) @@ -515,13 +530,13 @@ func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 * return ret0, ret1 } -// ClientFindData indicates an expected call of ClientFindData +// ClientFindData indicates an expected call of ClientFindData. func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2) } -// ClientGenCar mocks base method +// ClientGenCar mocks base method. func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2) @@ -529,13 +544,13 @@ func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 return ret0 } -// ClientGenCar indicates an expected call of ClientGenCar +// ClientGenCar indicates an expected call of ClientGenCar. func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2) } -// ClientGetDealInfo mocks base method +// ClientGetDealInfo mocks base method. func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1) @@ -544,13 +559,13 @@ func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*a return ret0, ret1 } -// ClientGetDealInfo indicates an expected call of ClientGetDealInfo +// ClientGetDealInfo indicates an expected call of ClientGetDealInfo. func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1) } -// ClientGetDealStatus mocks base method +// ClientGetDealStatus mocks base method. func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1) @@ -559,13 +574,13 @@ func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (s return ret0, ret1 } -// ClientGetDealStatus indicates an expected call of ClientGetDealStatus +// ClientGetDealStatus indicates an expected call of ClientGetDealStatus. func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1) } -// ClientGetDealUpdates mocks base method +// ClientGetDealUpdates mocks base method. func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0) @@ -574,13 +589,28 @@ func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.De return ret0, ret1 } -// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates +// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates. func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0) } -// ClientHasLocal mocks base method +// ClientGetRetrievalUpdates mocks base method. +func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0) + ret0, _ := ret[0].(<-chan api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates. +func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0) +} + +// ClientHasLocal mocks base method. func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1) @@ -589,13 +619,13 @@ func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, return ret0, ret1 } -// ClientHasLocal indicates an expected call of ClientHasLocal +// ClientHasLocal indicates an expected call of ClientHasLocal. func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1) } -// ClientImport mocks base method +// ClientImport mocks base method. func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientImport", arg0, arg1) @@ -604,13 +634,13 @@ func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*ap return ret0, ret1 } -// ClientImport indicates an expected call of ClientImport +// ClientImport indicates an expected call of ClientImport. func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1) } -// ClientListDataTransfers mocks base method +// ClientListDataTransfers mocks base method. func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0) @@ -619,13 +649,13 @@ func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.Data return ret0, ret1 } -// ClientListDataTransfers indicates an expected call of ClientListDataTransfers +// ClientListDataTransfers indicates an expected call of ClientListDataTransfers. func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0) } -// ClientListDeals mocks base method +// ClientListDeals mocks base method. func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientListDeals", arg0) @@ -634,13 +664,13 @@ func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, er return ret0, ret1 } -// ClientListDeals indicates an expected call of ClientListDeals +// ClientListDeals indicates an expected call of ClientListDeals. func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0) } -// ClientListImports mocks base method +// ClientListImports mocks base method. func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientListImports", arg0) @@ -649,13 +679,28 @@ func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, er return ret0, ret1 } -// ClientListImports indicates an expected call of ClientListImports +// ClientListImports indicates an expected call of ClientListImports. func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0) } -// ClientMinerQueryOffer mocks base method +// ClientListRetrievals mocks base method. +func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientListRetrievals", arg0) + ret0, _ := ret[0].([]api.RetrievalInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientListRetrievals indicates an expected call of ClientListRetrievals. +func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0) +} + +// ClientMinerQueryOffer mocks base method. func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3) @@ -664,13 +709,13 @@ func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address. return ret0, ret1 } -// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer +// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer. func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3) } -// ClientQueryAsk mocks base method +// ClientQueryAsk mocks base method. func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2) @@ -679,13 +724,13 @@ func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 a return ret0, ret1 } -// ClientQueryAsk indicates an expected call of ClientQueryAsk +// ClientQueryAsk indicates an expected call of ClientQueryAsk. func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2) } -// ClientRemoveImport mocks base method +// ClientRemoveImport mocks base method. func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore.StoreID) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1) @@ -693,13 +738,13 @@ func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 multistore. return ret0 } -// ClientRemoveImport indicates an expected call of ClientRemoveImport +// ClientRemoveImport indicates an expected call of ClientRemoveImport. func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1) } -// ClientRestartDataTransfer mocks base method +// ClientRestartDataTransfer mocks base method. func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3) @@ -707,13 +752,13 @@ func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 data return ret0 } -// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer +// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer. func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3) } -// ClientRetrieve mocks base method +// ClientRetrieve mocks base method. func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) @@ -721,13 +766,13 @@ func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOr return ret0 } -// ClientRetrieve indicates an expected call of ClientRetrieve +// ClientRetrieve indicates an expected call of ClientRetrieve. func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) } -// ClientRetrieveTryRestartInsufficientFunds mocks base method +// ClientRetrieveTryRestartInsufficientFunds mocks base method. func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1) @@ -735,13 +780,13 @@ func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Co return ret0 } -// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds +// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds. func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) } -// ClientRetrieveWithEvents mocks base method +// ClientRetrieveWithEvents mocks base method. func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) @@ -750,13 +795,13 @@ func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.R return ret0, ret1 } -// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents +// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) } -// ClientStartDeal mocks base method +// ClientStartDeal mocks base method. func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1) @@ -765,13 +810,28 @@ func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDeal return ret0, ret1 } -// ClientStartDeal indicates an expected call of ClientStartDeal +// ClientStartDeal indicates an expected call of ClientStartDeal. func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1) } -// Closing mocks base method +// ClientStatelessDeal mocks base method. +func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1) + ret0, _ := ret[0].(*cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClientStatelessDeal indicates an expected call of ClientStatelessDeal. +func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1) +} + +// Closing mocks base method. func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Closing", arg0) @@ -780,13 +840,13 @@ func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { return ret0, ret1 } -// Closing indicates an expected call of Closing +// Closing indicates an expected call of Closing. func (mr *MockFullNodeMockRecorder) Closing(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Closing", reflect.TypeOf((*MockFullNode)(nil).Closing), arg0) } -// CreateBackup mocks base method +// CreateBackup mocks base method. func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateBackup", arg0, arg1) @@ -794,13 +854,13 @@ func (m *MockFullNode) CreateBackup(arg0 context.Context, arg1 string) error { return ret0 } -// CreateBackup indicates an expected call of CreateBackup +// CreateBackup indicates an expected call of CreateBackup. func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1) } -// Discover mocks base method +// Discover mocks base method. func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Discover", arg0) @@ -809,13 +869,13 @@ func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, return ret0, ret1 } -// Discover indicates an expected call of Discover +// Discover indicates an expected call of Discover. func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0) } -// GasEstimateFeeCap mocks base method +// GasEstimateFeeCap mocks base method. func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateFeeCap", arg0, arg1, arg2, arg3) @@ -824,13 +884,13 @@ func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Messa return ret0, ret1 } -// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap +// GasEstimateFeeCap indicates an expected call of GasEstimateFeeCap. func (mr *MockFullNodeMockRecorder) GasEstimateFeeCap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateFeeCap", reflect.TypeOf((*MockFullNode)(nil).GasEstimateFeeCap), arg0, arg1, arg2, arg3) } -// GasEstimateGasLimit mocks base method +// GasEstimateGasLimit mocks base method. func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (int64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateGasLimit", arg0, arg1, arg2) @@ -839,13 +899,13 @@ func (m *MockFullNode) GasEstimateGasLimit(arg0 context.Context, arg1 *types.Mes return ret0, ret1 } -// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit +// GasEstimateGasLimit indicates an expected call of GasEstimateGasLimit. func (mr *MockFullNodeMockRecorder) GasEstimateGasLimit(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasLimit", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasLimit), arg0, arg1, arg2) } -// GasEstimateGasPremium mocks base method +// GasEstimateGasPremium mocks base method. func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, arg2 address.Address, arg3 int64, arg4 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateGasPremium", arg0, arg1, arg2, arg3, arg4) @@ -854,13 +914,13 @@ func (m *MockFullNode) GasEstimateGasPremium(arg0 context.Context, arg1 uint64, return ret0, ret1 } -// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium +// GasEstimateGasPremium indicates an expected call of GasEstimateGasPremium. func (mr *MockFullNodeMockRecorder) GasEstimateGasPremium(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateGasPremium", reflect.TypeOf((*MockFullNode)(nil).GasEstimateGasPremium), arg0, arg1, arg2, arg3, arg4) } -// GasEstimateMessageGas mocks base method +// GasEstimateMessageGas mocks base method. func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec, arg3 types.TipSetKey) (*types.Message, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GasEstimateMessageGas", arg0, arg1, arg2, arg3) @@ -869,13 +929,13 @@ func (m *MockFullNode) GasEstimateMessageGas(arg0 context.Context, arg1 *types.M return ret0, ret1 } -// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas +// GasEstimateMessageGas indicates an expected call of GasEstimateMessageGas. func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) } -// ID mocks base method +// ID mocks base method. func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ID", arg0) @@ -884,13 +944,13 @@ func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { return ret0, ret1 } -// ID indicates an expected call of ID +// ID indicates an expected call of ID. func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockFullNode)(nil).ID), arg0) } -// LogList mocks base method +// LogList mocks base method. func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LogList", arg0) @@ -899,13 +959,13 @@ func (m *MockFullNode) LogList(arg0 context.Context) ([]string, error) { return ret0, ret1 } -// LogList indicates an expected call of LogList +// LogList indicates an expected call of LogList. func (mr *MockFullNodeMockRecorder) LogList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogList", reflect.TypeOf((*MockFullNode)(nil).LogList), arg0) } -// LogSetLevel mocks base method +// LogSetLevel mocks base method. func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LogSetLevel", arg0, arg1, arg2) @@ -913,13 +973,13 @@ func (m *MockFullNode) LogSetLevel(arg0 context.Context, arg1, arg2 string) erro return ret0 } -// LogSetLevel indicates an expected call of LogSetLevel +// LogSetLevel indicates an expected call of LogSetLevel. func (mr *MockFullNodeMockRecorder) LogSetLevel(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogSetLevel", reflect.TypeOf((*MockFullNode)(nil).LogSetLevel), arg0, arg1, arg2) } -// MarketAddBalance mocks base method +// MarketAddBalance mocks base method. func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketAddBalance", arg0, arg1, arg2, arg3) @@ -928,13 +988,13 @@ func (m *MockFullNode) MarketAddBalance(arg0 context.Context, arg1, arg2 address return ret0, ret1 } -// MarketAddBalance indicates an expected call of MarketAddBalance +// MarketAddBalance indicates an expected call of MarketAddBalance. func (mr *MockFullNodeMockRecorder) MarketAddBalance(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketAddBalance", reflect.TypeOf((*MockFullNode)(nil).MarketAddBalance), arg0, arg1, arg2, arg3) } -// MarketGetReserved mocks base method +// MarketGetReserved mocks base method. func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Address) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketGetReserved", arg0, arg1) @@ -943,13 +1003,13 @@ func (m *MockFullNode) MarketGetReserved(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// MarketGetReserved indicates an expected call of MarketGetReserved +// MarketGetReserved indicates an expected call of MarketGetReserved. func (mr *MockFullNodeMockRecorder) MarketGetReserved(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketGetReserved", reflect.TypeOf((*MockFullNode)(nil).MarketGetReserved), arg0, arg1) } -// MarketReleaseFunds mocks base method +// MarketReleaseFunds mocks base method. func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 big.Int) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketReleaseFunds", arg0, arg1, arg2) @@ -957,13 +1017,13 @@ func (m *MockFullNode) MarketReleaseFunds(arg0 context.Context, arg1 address.Add return ret0 } -// MarketReleaseFunds indicates an expected call of MarketReleaseFunds +// MarketReleaseFunds indicates an expected call of MarketReleaseFunds. func (mr *MockFullNodeMockRecorder) MarketReleaseFunds(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReleaseFunds), arg0, arg1, arg2) } -// MarketReserveFunds mocks base method +// MarketReserveFunds mocks base method. func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketReserveFunds", arg0, arg1, arg2, arg3) @@ -972,13 +1032,13 @@ func (m *MockFullNode) MarketReserveFunds(arg0 context.Context, arg1, arg2 addre return ret0, ret1 } -// MarketReserveFunds indicates an expected call of MarketReserveFunds +// MarketReserveFunds indicates an expected call of MarketReserveFunds. func (mr *MockFullNodeMockRecorder) MarketReserveFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketReserveFunds", reflect.TypeOf((*MockFullNode)(nil).MarketReserveFunds), arg0, arg1, arg2, arg3) } -// MarketWithdraw mocks base method +// MarketWithdraw mocks base method. func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MarketWithdraw", arg0, arg1, arg2, arg3) @@ -987,13 +1047,13 @@ func (m *MockFullNode) MarketWithdraw(arg0 context.Context, arg1, arg2 address.A return ret0, ret1 } -// MarketWithdraw indicates an expected call of MarketWithdraw +// MarketWithdraw indicates an expected call of MarketWithdraw. func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarketWithdraw", reflect.TypeOf((*MockFullNode)(nil).MarketWithdraw), arg0, arg1, arg2, arg3) } -// MinerCreateBlock mocks base method +// MinerCreateBlock mocks base method. func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTemplate) (*types.BlockMsg, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MinerCreateBlock", arg0, arg1) @@ -1002,13 +1062,13 @@ func (m *MockFullNode) MinerCreateBlock(arg0 context.Context, arg1 *api.BlockTem return ret0, ret1 } -// MinerCreateBlock indicates an expected call of MinerCreateBlock +// MinerCreateBlock indicates an expected call of MinerCreateBlock. func (mr *MockFullNodeMockRecorder) MinerCreateBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerCreateBlock", reflect.TypeOf((*MockFullNode)(nil).MinerCreateBlock), arg0, arg1) } -// MinerGetBaseInfo mocks base method +// MinerGetBaseInfo mocks base method. func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Address, arg2 abi.ChainEpoch, arg3 types.TipSetKey) (*api.MiningBaseInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MinerGetBaseInfo", arg0, arg1, arg2, arg3) @@ -1017,13 +1077,13 @@ func (m *MockFullNode) MinerGetBaseInfo(arg0 context.Context, arg1 address.Addre return ret0, ret1 } -// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo +// MinerGetBaseInfo indicates an expected call of MinerGetBaseInfo. func (mr *MockFullNodeMockRecorder) MinerGetBaseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MinerGetBaseInfo", reflect.TypeOf((*MockFullNode)(nil).MinerGetBaseInfo), arg0, arg1, arg2, arg3) } -// MpoolBatchPush mocks base method +// MpoolBatchPush mocks base method. func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolBatchPush", arg0, arg1) @@ -1032,13 +1092,13 @@ func (m *MockFullNode) MpoolBatchPush(arg0 context.Context, arg1 []*types.Signed return ret0, ret1 } -// MpoolBatchPush indicates an expected call of MpoolBatchPush +// MpoolBatchPush indicates an expected call of MpoolBatchPush. func (mr *MockFullNodeMockRecorder) MpoolBatchPush(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPush", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPush), arg0, arg1) } -// MpoolBatchPushMessage mocks base method +// MpoolBatchPushMessage mocks base method. func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types.Message, arg2 *api.MessageSendSpec) ([]*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolBatchPushMessage", arg0, arg1, arg2) @@ -1047,13 +1107,13 @@ func (m *MockFullNode) MpoolBatchPushMessage(arg0 context.Context, arg1 []*types return ret0, ret1 } -// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage +// MpoolBatchPushMessage indicates an expected call of MpoolBatchPushMessage. func (mr *MockFullNodeMockRecorder) MpoolBatchPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushMessage), arg0, arg1, arg2) } -// MpoolBatchPushUntrusted mocks base method +// MpoolBatchPushUntrusted mocks base method. func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*types.SignedMessage) ([]cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolBatchPushUntrusted", arg0, arg1) @@ -1062,13 +1122,13 @@ func (m *MockFullNode) MpoolBatchPushUntrusted(arg0 context.Context, arg1 []*typ return ret0, ret1 } -// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted +// MpoolBatchPushUntrusted indicates an expected call of MpoolBatchPushUntrusted. func (mr *MockFullNodeMockRecorder) MpoolBatchPushUntrusted(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolBatchPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolBatchPushUntrusted), arg0, arg1) } -// MpoolClear mocks base method +// MpoolClear mocks base method. func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolClear", arg0, arg1) @@ -1076,13 +1136,13 @@ func (m *MockFullNode) MpoolClear(arg0 context.Context, arg1 bool) error { return ret0 } -// MpoolClear indicates an expected call of MpoolClear +// MpoolClear indicates an expected call of MpoolClear. func (mr *MockFullNodeMockRecorder) MpoolClear(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolClear", reflect.TypeOf((*MockFullNode)(nil).MpoolClear), arg0, arg1) } -// MpoolGetConfig mocks base method +// MpoolGetConfig mocks base method. func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolGetConfig", arg0) @@ -1091,13 +1151,13 @@ func (m *MockFullNode) MpoolGetConfig(arg0 context.Context) (*types.MpoolConfig, return ret0, ret1 } -// MpoolGetConfig indicates an expected call of MpoolGetConfig +// MpoolGetConfig indicates an expected call of MpoolGetConfig. func (mr *MockFullNodeMockRecorder) MpoolGetConfig(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolGetConfig), arg0) } -// MpoolGetNonce mocks base method +// MpoolGetNonce mocks base method. func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) (uint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolGetNonce", arg0, arg1) @@ -1106,13 +1166,13 @@ func (m *MockFullNode) MpoolGetNonce(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// MpoolGetNonce indicates an expected call of MpoolGetNonce +// MpoolGetNonce indicates an expected call of MpoolGetNonce. func (mr *MockFullNodeMockRecorder) MpoolGetNonce(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolGetNonce", reflect.TypeOf((*MockFullNode)(nil).MpoolGetNonce), arg0, arg1) } -// MpoolPending mocks base method +// MpoolPending mocks base method. func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) ([]*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPending", arg0, arg1) @@ -1121,13 +1181,13 @@ func (m *MockFullNode) MpoolPending(arg0 context.Context, arg1 types.TipSetKey) return ret0, ret1 } -// MpoolPending indicates an expected call of MpoolPending +// MpoolPending indicates an expected call of MpoolPending. func (mr *MockFullNodeMockRecorder) MpoolPending(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPending", reflect.TypeOf((*MockFullNode)(nil).MpoolPending), arg0, arg1) } -// MpoolPush mocks base method +// MpoolPush mocks base method. func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPush", arg0, arg1) @@ -1136,13 +1196,13 @@ func (m *MockFullNode) MpoolPush(arg0 context.Context, arg1 *types.SignedMessage return ret0, ret1 } -// MpoolPush indicates an expected call of MpoolPush +// MpoolPush indicates an expected call of MpoolPush. func (mr *MockFullNodeMockRecorder) MpoolPush(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPush", reflect.TypeOf((*MockFullNode)(nil).MpoolPush), arg0, arg1) } -// MpoolPushMessage mocks base method +// MpoolPushMessage mocks base method. func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Message, arg2 *api.MessageSendSpec) (*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPushMessage", arg0, arg1, arg2) @@ -1151,13 +1211,13 @@ func (m *MockFullNode) MpoolPushMessage(arg0 context.Context, arg1 *types.Messag return ret0, ret1 } -// MpoolPushMessage indicates an expected call of MpoolPushMessage +// MpoolPushMessage indicates an expected call of MpoolPushMessage. func (mr *MockFullNodeMockRecorder) MpoolPushMessage(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushMessage", reflect.TypeOf((*MockFullNode)(nil).MpoolPushMessage), arg0, arg1, arg2) } -// MpoolPushUntrusted mocks base method +// MpoolPushUntrusted mocks base method. func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.SignedMessage) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolPushUntrusted", arg0, arg1) @@ -1166,13 +1226,13 @@ func (m *MockFullNode) MpoolPushUntrusted(arg0 context.Context, arg1 *types.Sign return ret0, ret1 } -// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted +// MpoolPushUntrusted indicates an expected call of MpoolPushUntrusted. func (mr *MockFullNodeMockRecorder) MpoolPushUntrusted(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPushUntrusted", reflect.TypeOf((*MockFullNode)(nil).MpoolPushUntrusted), arg0, arg1) } -// MpoolSelect mocks base method +// MpoolSelect mocks base method. func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, arg2 float64) ([]*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolSelect", arg0, arg1, arg2) @@ -1181,13 +1241,13 @@ func (m *MockFullNode) MpoolSelect(arg0 context.Context, arg1 types.TipSetKey, a return ret0, ret1 } -// MpoolSelect indicates an expected call of MpoolSelect +// MpoolSelect indicates an expected call of MpoolSelect. func (mr *MockFullNodeMockRecorder) MpoolSelect(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSelect", reflect.TypeOf((*MockFullNode)(nil).MpoolSelect), arg0, arg1, arg2) } -// MpoolSetConfig mocks base method +// MpoolSetConfig mocks base method. func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolConfig) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolSetConfig", arg0, arg1) @@ -1195,13 +1255,13 @@ func (m *MockFullNode) MpoolSetConfig(arg0 context.Context, arg1 *types.MpoolCon return ret0 } -// MpoolSetConfig indicates an expected call of MpoolSetConfig +// MpoolSetConfig indicates an expected call of MpoolSetConfig. func (mr *MockFullNodeMockRecorder) MpoolSetConfig(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSetConfig", reflect.TypeOf((*MockFullNode)(nil).MpoolSetConfig), arg0, arg1) } -// MpoolSub mocks base method +// MpoolSub mocks base method. func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MpoolSub", arg0) @@ -1210,13 +1270,13 @@ func (m *MockFullNode) MpoolSub(arg0 context.Context) (<-chan api.MpoolUpdate, e return ret0, ret1 } -// MpoolSub indicates an expected call of MpoolSub +// MpoolSub indicates an expected call of MpoolSub. func (mr *MockFullNodeMockRecorder) MpoolSub(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolSub", reflect.TypeOf((*MockFullNode)(nil).MpoolSub), arg0) } -// MsigAddApprove mocks base method +// MsigAddApprove mocks base method. func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address, arg6 bool) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigAddApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) @@ -1225,13 +1285,13 @@ func (m *MockFullNode) MsigAddApprove(arg0 context.Context, arg1, arg2 address.A return ret0, ret1 } -// MsigAddApprove indicates an expected call of MsigAddApprove +// MsigAddApprove indicates an expected call of MsigAddApprove. func (mr *MockFullNodeMockRecorder) MsigAddApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddApprove", reflect.TypeOf((*MockFullNode)(nil).MsigAddApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigAddCancel mocks base method +// MsigAddCancel mocks base method. func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4 address.Address, arg5 bool) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigAddCancel", arg0, arg1, arg2, arg3, arg4, arg5) @@ -1240,13 +1300,13 @@ func (m *MockFullNode) MsigAddCancel(arg0 context.Context, arg1, arg2 address.Ad return ret0, ret1 } -// MsigAddCancel indicates an expected call of MsigAddCancel +// MsigAddCancel indicates an expected call of MsigAddCancel. func (mr *MockFullNodeMockRecorder) MsigAddCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddCancel", reflect.TypeOf((*MockFullNode)(nil).MsigAddCancel), arg0, arg1, arg2, arg3, arg4, arg5) } -// MsigAddPropose mocks base method +// MsigAddPropose mocks base method. func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigAddPropose", arg0, arg1, arg2, arg3, arg4) @@ -1255,13 +1315,13 @@ func (m *MockFullNode) MsigAddPropose(arg0 context.Context, arg1, arg2, arg3 add return ret0, ret1 } -// MsigAddPropose indicates an expected call of MsigAddPropose +// MsigAddPropose indicates an expected call of MsigAddPropose. func (mr *MockFullNodeMockRecorder) MsigAddPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigAddPropose", reflect.TypeOf((*MockFullNode)(nil).MsigAddPropose), arg0, arg1, arg2, arg3, arg4) } -// MsigApprove mocks base method +// MsigApprove mocks base method. func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigApprove", arg0, arg1, arg2, arg3) @@ -1270,13 +1330,13 @@ func (m *MockFullNode) MsigApprove(arg0 context.Context, arg1 address.Address, a return ret0, ret1 } -// MsigApprove indicates an expected call of MsigApprove +// MsigApprove indicates an expected call of MsigApprove. func (mr *MockFullNodeMockRecorder) MsigApprove(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApprove", reflect.TypeOf((*MockFullNode)(nil).MsigApprove), arg0, arg1, arg2, arg3) } -// MsigApproveTxnHash mocks base method +// MsigApproveTxnHash mocks base method. func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3, arg4 address.Address, arg5 big.Int, arg6 address.Address, arg7 uint64, arg8 []byte) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigApproveTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) @@ -1285,13 +1345,13 @@ func (m *MockFullNode) MsigApproveTxnHash(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash +// MsigApproveTxnHash indicates an expected call of MsigApproveTxnHash. func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigApproveTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigApproveTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } -// MsigCancel mocks base method +// MsigCancel mocks base method. func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) @@ -1300,13 +1360,13 @@ func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, ar return ret0, ret1 } -// MsigCancel indicates an expected call of MsigCancel +// MsigCancel indicates an expected call of MsigCancel. func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } -// MsigCreate mocks base method +// MsigCreate mocks base method. func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []address.Address, arg3 abi.ChainEpoch, arg4 big.Int, arg5 address.Address, arg6 big.Int) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigCreate", arg0, arg1, arg2, arg3, arg4, arg5, arg6) @@ -1315,13 +1375,13 @@ func (m *MockFullNode) MsigCreate(arg0 context.Context, arg1 uint64, arg2 []addr return ret0, ret1 } -// MsigCreate indicates an expected call of MsigCreate +// MsigCreate indicates an expected call of MsigCreate. func (mr *MockFullNodeMockRecorder) MsigCreate(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCreate", reflect.TypeOf((*MockFullNode)(nil).MsigCreate), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigGetAvailableBalance mocks base method +// MsigGetAvailableBalance mocks base method. func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetAvailableBalance", arg0, arg1, arg2) @@ -1330,13 +1390,13 @@ func (m *MockFullNode) MsigGetAvailableBalance(arg0 context.Context, arg1 addres return ret0, ret1 } -// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance +// MsigGetAvailableBalance indicates an expected call of MsigGetAvailableBalance. func (mr *MockFullNodeMockRecorder) MsigGetAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).MsigGetAvailableBalance), arg0, arg1, arg2) } -// MsigGetPending mocks base method +// MsigGetPending mocks base method. func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*api.MsigTransaction, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetPending", arg0, arg1, arg2) @@ -1345,13 +1405,13 @@ func (m *MockFullNode) MsigGetPending(arg0 context.Context, arg1 address.Address return ret0, ret1 } -// MsigGetPending indicates an expected call of MsigGetPending +// MsigGetPending indicates an expected call of MsigGetPending. func (mr *MockFullNodeMockRecorder) MsigGetPending(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetPending", reflect.TypeOf((*MockFullNode)(nil).MsigGetPending), arg0, arg1, arg2) } -// MsigGetVested mocks base method +// MsigGetVested mocks base method. func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, arg2, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetVested", arg0, arg1, arg2, arg3) @@ -1360,13 +1420,13 @@ func (m *MockFullNode) MsigGetVested(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// MsigGetVested indicates an expected call of MsigGetVested +// MsigGetVested indicates an expected call of MsigGetVested. func (mr *MockFullNodeMockRecorder) MsigGetVested(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVested", reflect.TypeOf((*MockFullNode)(nil).MsigGetVested), arg0, arg1, arg2, arg3) } -// MsigGetVestingSchedule mocks base method +// MsigGetVestingSchedule mocks base method. func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MsigVesting, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigGetVestingSchedule", arg0, arg1, arg2) @@ -1375,13 +1435,13 @@ func (m *MockFullNode) MsigGetVestingSchedule(arg0 context.Context, arg1 address return ret0, ret1 } -// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule +// MsigGetVestingSchedule indicates an expected call of MsigGetVestingSchedule. func (mr *MockFullNodeMockRecorder) MsigGetVestingSchedule(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigGetVestingSchedule", reflect.TypeOf((*MockFullNode)(nil).MsigGetVestingSchedule), arg0, arg1, arg2) } -// MsigPropose mocks base method +// MsigPropose mocks base method. func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 address.Address, arg5 uint64, arg6 []byte) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigPropose", arg0, arg1, arg2, arg3, arg4, arg5, arg6) @@ -1390,13 +1450,13 @@ func (m *MockFullNode) MsigPropose(arg0 context.Context, arg1, arg2 address.Addr return ret0, ret1 } -// MsigPropose indicates an expected call of MsigPropose +// MsigPropose indicates an expected call of MsigPropose. func (mr *MockFullNodeMockRecorder) MsigPropose(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigPropose", reflect.TypeOf((*MockFullNode)(nil).MsigPropose), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigRemoveSigner mocks base method +// MsigRemoveSigner mocks base method. func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 address.Address, arg4 bool) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigRemoveSigner", arg0, arg1, arg2, arg3, arg4) @@ -1405,13 +1465,13 @@ func (m *MockFullNode) MsigRemoveSigner(arg0 context.Context, arg1, arg2, arg3 a return ret0, ret1 } -// MsigRemoveSigner indicates an expected call of MsigRemoveSigner +// MsigRemoveSigner indicates an expected call of MsigRemoveSigner. func (mr *MockFullNodeMockRecorder) MsigRemoveSigner(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigRemoveSigner", reflect.TypeOf((*MockFullNode)(nil).MsigRemoveSigner), arg0, arg1, arg2, arg3, arg4) } -// MsigSwapApprove mocks base method +// MsigSwapApprove mocks base method. func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5, arg6 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigSwapApprove", arg0, arg1, arg2, arg3, arg4, arg5, arg6) @@ -1420,13 +1480,13 @@ func (m *MockFullNode) MsigSwapApprove(arg0 context.Context, arg1, arg2 address. return ret0, ret1 } -// MsigSwapApprove indicates an expected call of MsigSwapApprove +// MsigSwapApprove indicates an expected call of MsigSwapApprove. func (mr *MockFullNodeMockRecorder) MsigSwapApprove(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapApprove", reflect.TypeOf((*MockFullNode)(nil).MsigSwapApprove), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } -// MsigSwapCancel mocks base method +// MsigSwapCancel mocks base method. func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.Address, arg3 uint64, arg4, arg5 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigSwapCancel", arg0, arg1, arg2, arg3, arg4, arg5) @@ -1435,13 +1495,13 @@ func (m *MockFullNode) MsigSwapCancel(arg0 context.Context, arg1, arg2 address.A return ret0, ret1 } -// MsigSwapCancel indicates an expected call of MsigSwapCancel +// MsigSwapCancel indicates an expected call of MsigSwapCancel. func (mr *MockFullNodeMockRecorder) MsigSwapCancel(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapCancel", reflect.TypeOf((*MockFullNode)(nil).MsigSwapCancel), arg0, arg1, arg2, arg3, arg4, arg5) } -// MsigSwapPropose mocks base method +// MsigSwapPropose mocks base method. func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, arg4 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MsigSwapPropose", arg0, arg1, arg2, arg3, arg4) @@ -1450,13 +1510,13 @@ func (m *MockFullNode) MsigSwapPropose(arg0 context.Context, arg1, arg2, arg3, a return ret0, ret1 } -// MsigSwapPropose indicates an expected call of MsigSwapPropose +// MsigSwapPropose indicates an expected call of MsigSwapPropose. func (mr *MockFullNodeMockRecorder) MsigSwapPropose(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigSwapPropose", reflect.TypeOf((*MockFullNode)(nil).MsigSwapPropose), arg0, arg1, arg2, arg3, arg4) } -// NetAddrsListen mocks base method +// NetAddrsListen mocks base method. func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetAddrsListen", arg0) @@ -1465,13 +1525,13 @@ func (m *MockFullNode) NetAddrsListen(arg0 context.Context) (peer.AddrInfo, erro return ret0, ret1 } -// NetAddrsListen indicates an expected call of NetAddrsListen +// NetAddrsListen indicates an expected call of NetAddrsListen. func (mr *MockFullNodeMockRecorder) NetAddrsListen(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAddrsListen", reflect.TypeOf((*MockFullNode)(nil).NetAddrsListen), arg0) } -// NetAgentVersion mocks base method +// NetAgentVersion mocks base method. func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetAgentVersion", arg0, arg1) @@ -1480,13 +1540,13 @@ func (m *MockFullNode) NetAgentVersion(arg0 context.Context, arg1 peer.ID) (stri return ret0, ret1 } -// NetAgentVersion indicates an expected call of NetAgentVersion +// NetAgentVersion indicates an expected call of NetAgentVersion. func (mr *MockFullNodeMockRecorder) NetAgentVersion(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAgentVersion", reflect.TypeOf((*MockFullNode)(nil).NetAgentVersion), arg0, arg1) } -// NetAutoNatStatus mocks base method +// NetAutoNatStatus mocks base method. func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetAutoNatStatus", arg0) @@ -1495,13 +1555,13 @@ func (m *MockFullNode) NetAutoNatStatus(arg0 context.Context) (api.NatInfo, erro return ret0, ret1 } -// NetAutoNatStatus indicates an expected call of NetAutoNatStatus +// NetAutoNatStatus indicates an expected call of NetAutoNatStatus. func (mr *MockFullNodeMockRecorder) NetAutoNatStatus(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetAutoNatStatus", reflect.TypeOf((*MockFullNode)(nil).NetAutoNatStatus), arg0) } -// NetBandwidthStats mocks base method +// NetBandwidthStats mocks base method. func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBandwidthStats", arg0) @@ -1510,13 +1570,13 @@ func (m *MockFullNode) NetBandwidthStats(arg0 context.Context) (metrics.Stats, e return ret0, ret1 } -// NetBandwidthStats indicates an expected call of NetBandwidthStats +// NetBandwidthStats indicates an expected call of NetBandwidthStats. func (mr *MockFullNodeMockRecorder) NetBandwidthStats(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStats", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStats), arg0) } -// NetBandwidthStatsByPeer mocks base method +// NetBandwidthStatsByPeer mocks base method. func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string]metrics.Stats, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBandwidthStatsByPeer", arg0) @@ -1525,13 +1585,13 @@ func (m *MockFullNode) NetBandwidthStatsByPeer(arg0 context.Context) (map[string return ret0, ret1 } -// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer +// NetBandwidthStatsByPeer indicates an expected call of NetBandwidthStatsByPeer. func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByPeer(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByPeer", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByPeer), arg0) } -// NetBandwidthStatsByProtocol mocks base method +// NetBandwidthStatsByProtocol mocks base method. func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[protocol.ID]metrics.Stats, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBandwidthStatsByProtocol", arg0) @@ -1540,13 +1600,13 @@ func (m *MockFullNode) NetBandwidthStatsByProtocol(arg0 context.Context) (map[pr return ret0, ret1 } -// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol +// NetBandwidthStatsByProtocol indicates an expected call of NetBandwidthStatsByProtocol. func (mr *MockFullNodeMockRecorder) NetBandwidthStatsByProtocol(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBandwidthStatsByProtocol", reflect.TypeOf((*MockFullNode)(nil).NetBandwidthStatsByProtocol), arg0) } -// NetBlockAdd mocks base method +// NetBlockAdd mocks base method. func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBlockAdd", arg0, arg1) @@ -1554,13 +1614,13 @@ func (m *MockFullNode) NetBlockAdd(arg0 context.Context, arg1 api.NetBlockList) return ret0 } -// NetBlockAdd indicates an expected call of NetBlockAdd +// NetBlockAdd indicates an expected call of NetBlockAdd. func (mr *MockFullNodeMockRecorder) NetBlockAdd(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockAdd", reflect.TypeOf((*MockFullNode)(nil).NetBlockAdd), arg0, arg1) } -// NetBlockList mocks base method +// NetBlockList mocks base method. func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBlockList", arg0) @@ -1569,13 +1629,13 @@ func (m *MockFullNode) NetBlockList(arg0 context.Context) (api.NetBlockList, err return ret0, ret1 } -// NetBlockList indicates an expected call of NetBlockList +// NetBlockList indicates an expected call of NetBlockList. func (mr *MockFullNodeMockRecorder) NetBlockList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockList", reflect.TypeOf((*MockFullNode)(nil).NetBlockList), arg0) } -// NetBlockRemove mocks base method +// NetBlockRemove mocks base method. func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockList) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetBlockRemove", arg0, arg1) @@ -1583,13 +1643,13 @@ func (m *MockFullNode) NetBlockRemove(arg0 context.Context, arg1 api.NetBlockLis return ret0 } -// NetBlockRemove indicates an expected call of NetBlockRemove +// NetBlockRemove indicates an expected call of NetBlockRemove. func (mr *MockFullNodeMockRecorder) NetBlockRemove(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetBlockRemove", reflect.TypeOf((*MockFullNode)(nil).NetBlockRemove), arg0, arg1) } -// NetConnect mocks base method +// NetConnect mocks base method. func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetConnect", arg0, arg1) @@ -1597,13 +1657,13 @@ func (m *MockFullNode) NetConnect(arg0 context.Context, arg1 peer.AddrInfo) erro return ret0 } -// NetConnect indicates an expected call of NetConnect +// NetConnect indicates an expected call of NetConnect. func (mr *MockFullNodeMockRecorder) NetConnect(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnect", reflect.TypeOf((*MockFullNode)(nil).NetConnect), arg0, arg1) } -// NetConnectedness mocks base method +// NetConnectedness mocks base method. func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (network0.Connectedness, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetConnectedness", arg0, arg1) @@ -1612,13 +1672,13 @@ func (m *MockFullNode) NetConnectedness(arg0 context.Context, arg1 peer.ID) (net return ret0, ret1 } -// NetConnectedness indicates an expected call of NetConnectedness +// NetConnectedness indicates an expected call of NetConnectedness. func (mr *MockFullNodeMockRecorder) NetConnectedness(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetConnectedness", reflect.TypeOf((*MockFullNode)(nil).NetConnectedness), arg0, arg1) } -// NetDisconnect mocks base method +// NetDisconnect mocks base method. func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetDisconnect", arg0, arg1) @@ -1626,13 +1686,13 @@ func (m *MockFullNode) NetDisconnect(arg0 context.Context, arg1 peer.ID) error { return ret0 } -// NetDisconnect indicates an expected call of NetDisconnect +// NetDisconnect indicates an expected call of NetDisconnect. func (mr *MockFullNodeMockRecorder) NetDisconnect(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetDisconnect", reflect.TypeOf((*MockFullNode)(nil).NetDisconnect), arg0, arg1) } -// NetFindPeer mocks base method +// NetFindPeer mocks base method. func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.AddrInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetFindPeer", arg0, arg1) @@ -1641,13 +1701,13 @@ func (m *MockFullNode) NetFindPeer(arg0 context.Context, arg1 peer.ID) (peer.Add return ret0, ret1 } -// NetFindPeer indicates an expected call of NetFindPeer +// NetFindPeer indicates an expected call of NetFindPeer. func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) } -// NetPeerInfo mocks base method +// NetPeerInfo mocks base method. func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetPeerInfo", arg0, arg1) @@ -1656,13 +1716,13 @@ func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.Ext return ret0, ret1 } -// NetPeerInfo indicates an expected call of NetPeerInfo +// NetPeerInfo indicates an expected call of NetPeerInfo. func (mr *MockFullNodeMockRecorder) NetPeerInfo(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeerInfo", reflect.TypeOf((*MockFullNode)(nil).NetPeerInfo), arg0, arg1) } -// NetPeers mocks base method +// NetPeers mocks base method. func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetPeers", arg0) @@ -1671,13 +1731,13 @@ func (m *MockFullNode) NetPeers(arg0 context.Context) ([]peer.AddrInfo, error) { return ret0, ret1 } -// NetPeers indicates an expected call of NetPeers +// NetPeers indicates an expected call of NetPeers. func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0) } -// NetPubsubScores mocks base method +// NetPubsubScores mocks base method. func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NetPubsubScores", arg0) @@ -1686,13 +1746,13 @@ func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, return ret0, ret1 } -// NetPubsubScores indicates an expected call of NetPubsubScores +// NetPubsubScores indicates an expected call of NetPubsubScores. func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) } -// PaychAllocateLane mocks base method +// PaychAllocateLane mocks base method. func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychAllocateLane", arg0, arg1) @@ -1701,13 +1761,13 @@ func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// PaychAllocateLane indicates an expected call of PaychAllocateLane +// PaychAllocateLane indicates an expected call of PaychAllocateLane. func (mr *MockFullNodeMockRecorder) PaychAllocateLane(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAllocateLane", reflect.TypeOf((*MockFullNode)(nil).PaychAllocateLane), arg0, arg1) } -// PaychAvailableFunds mocks base method +// PaychAvailableFunds mocks base method. func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Address) (*api.ChannelAvailableFunds, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychAvailableFunds", arg0, arg1) @@ -1716,13 +1776,13 @@ func (m *MockFullNode) PaychAvailableFunds(arg0 context.Context, arg1 address.Ad return ret0, ret1 } -// PaychAvailableFunds indicates an expected call of PaychAvailableFunds +// PaychAvailableFunds indicates an expected call of PaychAvailableFunds. func (mr *MockFullNodeMockRecorder) PaychAvailableFunds(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFunds", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFunds), arg0, arg1) } -// PaychAvailableFundsByFromTo mocks base method +// PaychAvailableFundsByFromTo mocks base method. func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, arg2 address.Address) (*api.ChannelAvailableFunds, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychAvailableFundsByFromTo", arg0, arg1, arg2) @@ -1731,13 +1791,13 @@ func (m *MockFullNode) PaychAvailableFundsByFromTo(arg0 context.Context, arg1, a return ret0, ret1 } -// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo +// PaychAvailableFundsByFromTo indicates an expected call of PaychAvailableFundsByFromTo. func (mr *MockFullNodeMockRecorder) PaychAvailableFundsByFromTo(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychAvailableFundsByFromTo", reflect.TypeOf((*MockFullNode)(nil).PaychAvailableFundsByFromTo), arg0, arg1, arg2) } -// PaychCollect mocks base method +// PaychCollect mocks base method. func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychCollect", arg0, arg1) @@ -1746,13 +1806,13 @@ func (m *MockFullNode) PaychCollect(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// PaychCollect indicates an expected call of PaychCollect +// PaychCollect indicates an expected call of PaychCollect. func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1) } -// PaychGet mocks base method +// PaychGet mocks base method. func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3) @@ -1761,13 +1821,13 @@ func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address return ret0, ret1 } -// PaychGet indicates an expected call of PaychGet +// PaychGet indicates an expected call of PaychGet. func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3) } -// PaychGetWaitReady mocks base method +// PaychGetWaitReady mocks base method. func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychGetWaitReady", arg0, arg1) @@ -1776,13 +1836,13 @@ func (m *MockFullNode) PaychGetWaitReady(arg0 context.Context, arg1 cid.Cid) (ad return ret0, ret1 } -// PaychGetWaitReady indicates an expected call of PaychGetWaitReady +// PaychGetWaitReady indicates an expected call of PaychGetWaitReady. func (mr *MockFullNodeMockRecorder) PaychGetWaitReady(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGetWaitReady", reflect.TypeOf((*MockFullNode)(nil).PaychGetWaitReady), arg0, arg1) } -// PaychList mocks base method +// PaychList mocks base method. func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychList", arg0) @@ -1791,13 +1851,13 @@ func (m *MockFullNode) PaychList(arg0 context.Context) ([]address.Address, error return ret0, ret1 } -// PaychList indicates an expected call of PaychList +// PaychList indicates an expected call of PaychList. func (mr *MockFullNodeMockRecorder) PaychList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychList", reflect.TypeOf((*MockFullNode)(nil).PaychList), arg0) } -// PaychNewPayment mocks base method +// PaychNewPayment mocks base method. func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address.Address, arg3 []api.VoucherSpec) (*api.PaymentInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychNewPayment", arg0, arg1, arg2, arg3) @@ -1806,13 +1866,13 @@ func (m *MockFullNode) PaychNewPayment(arg0 context.Context, arg1, arg2 address. return ret0, ret1 } -// PaychNewPayment indicates an expected call of PaychNewPayment +// PaychNewPayment indicates an expected call of PaychNewPayment. func (mr *MockFullNodeMockRecorder) PaychNewPayment(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychNewPayment", reflect.TypeOf((*MockFullNode)(nil).PaychNewPayment), arg0, arg1, arg2, arg3) } -// PaychSettle mocks base method +// PaychSettle mocks base method. func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychSettle", arg0, arg1) @@ -1821,13 +1881,13 @@ func (m *MockFullNode) PaychSettle(arg0 context.Context, arg1 address.Address) ( return ret0, ret1 } -// PaychSettle indicates an expected call of PaychSettle +// PaychSettle indicates an expected call of PaychSettle. func (mr *MockFullNodeMockRecorder) PaychSettle(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychSettle", reflect.TypeOf((*MockFullNode)(nil).PaychSettle), arg0, arg1) } -// PaychStatus mocks base method +// PaychStatus mocks base method. func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) (*api.PaychStatus, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychStatus", arg0, arg1) @@ -1836,13 +1896,13 @@ func (m *MockFullNode) PaychStatus(arg0 context.Context, arg1 address.Address) ( return ret0, ret1 } -// PaychStatus indicates an expected call of PaychStatus +// PaychStatus indicates an expected call of PaychStatus. func (mr *MockFullNodeMockRecorder) PaychStatus(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychStatus", reflect.TypeOf((*MockFullNode)(nil).PaychStatus), arg0, arg1) } -// PaychVoucherAdd mocks base method +// PaychVoucherAdd mocks base method. func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3 []byte, arg4 big.Int) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherAdd", arg0, arg1, arg2, arg3, arg4) @@ -1851,13 +1911,13 @@ func (m *MockFullNode) PaychVoucherAdd(arg0 context.Context, arg1 address.Addres return ret0, ret1 } -// PaychVoucherAdd indicates an expected call of PaychVoucherAdd +// PaychVoucherAdd indicates an expected call of PaychVoucherAdd. func (mr *MockFullNodeMockRecorder) PaychVoucherAdd(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherAdd", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherAdd), arg0, arg1, arg2, arg3, arg4) } -// PaychVoucherCheckSpendable mocks base method +// PaychVoucherCheckSpendable mocks base method. func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherCheckSpendable", arg0, arg1, arg2, arg3, arg4) @@ -1866,13 +1926,13 @@ func (m *MockFullNode) PaychVoucherCheckSpendable(arg0 context.Context, arg1 add return ret0, ret1 } -// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable +// PaychVoucherCheckSpendable indicates an expected call of PaychVoucherCheckSpendable. func (mr *MockFullNodeMockRecorder) PaychVoucherCheckSpendable(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckSpendable", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckSpendable), arg0, arg1, arg2, arg3, arg4) } -// PaychVoucherCheckValid mocks base method +// PaychVoucherCheckValid mocks base method. func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherCheckValid", arg0, arg1, arg2) @@ -1880,13 +1940,13 @@ func (m *MockFullNode) PaychVoucherCheckValid(arg0 context.Context, arg1 address return ret0 } -// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid +// PaychVoucherCheckValid indicates an expected call of PaychVoucherCheckValid. func (mr *MockFullNodeMockRecorder) PaychVoucherCheckValid(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCheckValid", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCheckValid), arg0, arg1, arg2) } -// PaychVoucherCreate mocks base method +// PaychVoucherCreate mocks base method. func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 uint64) (*api.VoucherCreateResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherCreate", arg0, arg1, arg2, arg3) @@ -1895,13 +1955,13 @@ func (m *MockFullNode) PaychVoucherCreate(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// PaychVoucherCreate indicates an expected call of PaychVoucherCreate +// PaychVoucherCreate indicates an expected call of PaychVoucherCreate. func (mr *MockFullNodeMockRecorder) PaychVoucherCreate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherCreate", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherCreate), arg0, arg1, arg2, arg3) } -// PaychVoucherList mocks base method +// PaychVoucherList mocks base method. func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Address) ([]*paych.SignedVoucher, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherList", arg0, arg1) @@ -1910,13 +1970,13 @@ func (m *MockFullNode) PaychVoucherList(arg0 context.Context, arg1 address.Addre return ret0, ret1 } -// PaychVoucherList indicates an expected call of PaychVoucherList +// PaychVoucherList indicates an expected call of PaychVoucherList. func (mr *MockFullNodeMockRecorder) PaychVoucherList(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherList", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherList), arg0, arg1) } -// PaychVoucherSubmit mocks base method +// PaychVoucherSubmit mocks base method. func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Address, arg2 *paych.SignedVoucher, arg3, arg4 []byte) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PaychVoucherSubmit", arg0, arg1, arg2, arg3, arg4) @@ -1925,13 +1985,13 @@ func (m *MockFullNode) PaychVoucherSubmit(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit +// PaychVoucherSubmit indicates an expected call of PaychVoucherSubmit. func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) } -// Session mocks base method +// Session mocks base method. func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Session", arg0) @@ -1940,13 +2000,13 @@ func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { return ret0, ret1 } -// Session indicates an expected call of Session +// Session indicates an expected call of Session. func (mr *MockFullNodeMockRecorder) Session(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Session", reflect.TypeOf((*MockFullNode)(nil).Session), arg0) } -// Shutdown mocks base method +// Shutdown mocks base method. func (m *MockFullNode) Shutdown(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Shutdown", arg0) @@ -1954,13 +2014,13 @@ func (m *MockFullNode) Shutdown(arg0 context.Context) error { return ret0 } -// Shutdown indicates an expected call of Shutdown +// Shutdown indicates an expected call of Shutdown. func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0) } -// StateAccountKey mocks base method +// StateAccountKey mocks base method. func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateAccountKey", arg0, arg1, arg2) @@ -1969,13 +2029,13 @@ func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Addres return ret0, ret1 } -// StateAccountKey indicates an expected call of StateAccountKey +// StateAccountKey indicates an expected call of StateAccountKey. func (mr *MockFullNodeMockRecorder) StateAccountKey(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockFullNode)(nil).StateAccountKey), arg0, arg1, arg2) } -// StateAllMinerFaults mocks base method +// StateAllMinerFaults mocks base method. func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) ([]*api.Fault, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateAllMinerFaults", arg0, arg1, arg2) @@ -1984,13 +2044,13 @@ func (m *MockFullNode) StateAllMinerFaults(arg0 context.Context, arg1 abi.ChainE return ret0, ret1 } -// StateAllMinerFaults indicates an expected call of StateAllMinerFaults +// StateAllMinerFaults indicates an expected call of StateAllMinerFaults. func (mr *MockFullNodeMockRecorder) StateAllMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAllMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateAllMinerFaults), arg0, arg1, arg2) } -// StateCall mocks base method +// StateCall mocks base method. func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 types.TipSetKey) (*api.InvocResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateCall", arg0, arg1, arg2) @@ -1999,13 +2059,13 @@ func (m *MockFullNode) StateCall(arg0 context.Context, arg1 *types.Message, arg2 return ret0, ret1 } -// StateCall indicates an expected call of StateCall +// StateCall indicates an expected call of StateCall. func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCall", reflect.TypeOf((*MockFullNode)(nil).StateCall), arg0, arg1, arg2) } -// StateChangedActors mocks base method +// StateChangedActors mocks base method. func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2) @@ -2014,13 +2074,13 @@ func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.C return ret0, ret1 } -// StateChangedActors indicates an expected call of StateChangedActors +// StateChangedActors indicates an expected call of StateChangedActors. func (mr *MockFullNodeMockRecorder) StateChangedActors(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateChangedActors", reflect.TypeOf((*MockFullNode)(nil).StateChangedActors), arg0, arg1, arg2) } -// StateCirculatingSupply mocks base method +// StateCirculatingSupply mocks base method. func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateCirculatingSupply", arg0, arg1) @@ -2029,13 +2089,13 @@ func (m *MockFullNode) StateCirculatingSupply(arg0 context.Context, arg1 types.T return ret0, ret1 } -// StateCirculatingSupply indicates an expected call of StateCirculatingSupply +// StateCirculatingSupply indicates an expected call of StateCirculatingSupply. func (mr *MockFullNodeMockRecorder) StateCirculatingSupply(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCirculatingSupply", reflect.TypeOf((*MockFullNode)(nil).StateCirculatingSupply), arg0, arg1) } -// StateCompute mocks base method +// StateCompute mocks base method. func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, arg2 []*types.Message, arg3 types.TipSetKey) (*api.ComputeStateOutput, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateCompute", arg0, arg1, arg2, arg3) @@ -2044,13 +2104,13 @@ func (m *MockFullNode) StateCompute(arg0 context.Context, arg1 abi.ChainEpoch, a return ret0, ret1 } -// StateCompute indicates an expected call of StateCompute +// StateCompute indicates an expected call of StateCompute. func (mr *MockFullNodeMockRecorder) StateCompute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateCompute", reflect.TypeOf((*MockFullNode)(nil).StateCompute), arg0, arg1, arg2, arg3) } -// StateDealProviderCollateralBounds mocks base method +// StateDealProviderCollateralBounds mocks base method. func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, arg1 abi.PaddedPieceSize, arg2 bool, arg3 types.TipSetKey) (api.DealCollateralBounds, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateDealProviderCollateralBounds", arg0, arg1, arg2, arg3) @@ -2059,13 +2119,13 @@ func (m *MockFullNode) StateDealProviderCollateralBounds(arg0 context.Context, a return ret0, ret1 } -// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds +// StateDealProviderCollateralBounds indicates an expected call of StateDealProviderCollateralBounds. func (mr *MockFullNodeMockRecorder) StateDealProviderCollateralBounds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDealProviderCollateralBounds", reflect.TypeOf((*MockFullNode)(nil).StateDealProviderCollateralBounds), arg0, arg1, arg2, arg3) } -// StateDecodeParams mocks base method +// StateDecodeParams mocks base method. func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Address, arg2 abi.MethodNum, arg3 []byte, arg4 types.TipSetKey) (interface{}, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateDecodeParams", arg0, arg1, arg2, arg3, arg4) @@ -2074,13 +2134,13 @@ func (m *MockFullNode) StateDecodeParams(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// StateDecodeParams indicates an expected call of StateDecodeParams +// StateDecodeParams indicates an expected call of StateDecodeParams. func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateDecodeParams", reflect.TypeOf((*MockFullNode)(nil).StateDecodeParams), arg0, arg1, arg2, arg3, arg4) } -// StateGetActor mocks base method +// StateGetActor mocks base method. func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) @@ -2089,13 +2149,13 @@ func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// StateGetActor indicates an expected call of StateGetActor +// StateGetActor indicates an expected call of StateGetActor. func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) } -// StateGetReceipt mocks base method +// StateGetReceipt mocks base method. func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 types.TipSetKey) (*types.MessageReceipt, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateGetReceipt", arg0, arg1, arg2) @@ -2104,13 +2164,13 @@ func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 return ret0, ret1 } -// StateGetReceipt indicates an expected call of StateGetReceipt +// StateGetReceipt indicates an expected call of StateGetReceipt. func (mr *MockFullNodeMockRecorder) StateGetReceipt(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetReceipt", reflect.TypeOf((*MockFullNode)(nil).StateGetReceipt), arg0, arg1, arg2) } -// StateListActors mocks base method +// StateListActors mocks base method. func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateListActors", arg0, arg1) @@ -2119,13 +2179,13 @@ func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKe return ret0, ret1 } -// StateListActors indicates an expected call of StateListActors +// StateListActors indicates an expected call of StateListActors. func (mr *MockFullNodeMockRecorder) StateListActors(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListActors", reflect.TypeOf((*MockFullNode)(nil).StateListActors), arg0, arg1) } -// StateListMessages mocks base method +// StateListMessages mocks base method. func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.MessageMatch, arg2 types.TipSetKey, arg3 abi.ChainEpoch) ([]cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateListMessages", arg0, arg1, arg2, arg3) @@ -2134,13 +2194,13 @@ func (m *MockFullNode) StateListMessages(arg0 context.Context, arg1 *api.Message return ret0, ret1 } -// StateListMessages indicates an expected call of StateListMessages +// StateListMessages indicates an expected call of StateListMessages. func (mr *MockFullNodeMockRecorder) StateListMessages(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMessages", reflect.TypeOf((*MockFullNode)(nil).StateListMessages), arg0, arg1, arg2, arg3) } -// StateListMiners mocks base method +// StateListMiners mocks base method. func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateListMiners", arg0, arg1) @@ -2149,13 +2209,13 @@ func (m *MockFullNode) StateListMiners(arg0 context.Context, arg1 types.TipSetKe return ret0, ret1 } -// StateListMiners indicates an expected call of StateListMiners +// StateListMiners indicates an expected call of StateListMiners. func (mr *MockFullNodeMockRecorder) StateListMiners(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateListMiners", reflect.TypeOf((*MockFullNode)(nil).StateListMiners), arg0, arg1) } -// StateLookupID mocks base method +// StateLookupID mocks base method. func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateLookupID", arg0, arg1, arg2) @@ -2164,13 +2224,13 @@ func (m *MockFullNode) StateLookupID(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// StateLookupID indicates an expected call of StateLookupID +// StateLookupID indicates an expected call of StateLookupID. func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2) } -// StateMarketBalance mocks base method +// StateMarketBalance mocks base method. func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketBalance", arg0, arg1, arg2) @@ -2179,13 +2239,13 @@ func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// StateMarketBalance indicates an expected call of StateMarketBalance +// StateMarketBalance indicates an expected call of StateMarketBalance. func (mr *MockFullNodeMockRecorder) StateMarketBalance(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketBalance", reflect.TypeOf((*MockFullNode)(nil).StateMarketBalance), arg0, arg1, arg2) } -// StateMarketDeals mocks base method +// StateMarketDeals mocks base method. func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketDeal, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketDeals", arg0, arg1) @@ -2194,13 +2254,13 @@ func (m *MockFullNode) StateMarketDeals(arg0 context.Context, arg1 types.TipSetK return ret0, ret1 } -// StateMarketDeals indicates an expected call of StateMarketDeals +// StateMarketDeals indicates an expected call of StateMarketDeals. func (mr *MockFullNodeMockRecorder) StateMarketDeals(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketDeals", reflect.TypeOf((*MockFullNode)(nil).StateMarketDeals), arg0, arg1) } -// StateMarketParticipants mocks base method +// StateMarketParticipants mocks base method. func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types.TipSetKey) (map[string]api.MarketBalance, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketParticipants", arg0, arg1) @@ -2209,13 +2269,13 @@ func (m *MockFullNode) StateMarketParticipants(arg0 context.Context, arg1 types. return ret0, ret1 } -// StateMarketParticipants indicates an expected call of StateMarketParticipants +// StateMarketParticipants indicates an expected call of StateMarketParticipants. func (mr *MockFullNodeMockRecorder) StateMarketParticipants(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketParticipants", reflect.TypeOf((*MockFullNode)(nil).StateMarketParticipants), arg0, arg1) } -// StateMarketStorageDeal mocks base method +// StateMarketStorageDeal mocks base method. func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*api.MarketDeal, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMarketStorageDeal", arg0, arg1, arg2) @@ -2224,13 +2284,13 @@ func (m *MockFullNode) StateMarketStorageDeal(arg0 context.Context, arg1 abi.Dea return ret0, ret1 } -// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal +// StateMarketStorageDeal indicates an expected call of StateMarketStorageDeal. func (mr *MockFullNodeMockRecorder) StateMarketStorageDeal(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDeal", reflect.TypeOf((*MockFullNode)(nil).StateMarketStorageDeal), arg0, arg1, arg2) } -// StateMinerActiveSectors mocks base method +// StateMinerActiveSectors mocks base method. func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) @@ -2239,13 +2299,13 @@ func (m *MockFullNode) StateMinerActiveSectors(arg0 context.Context, arg1 addres return ret0, ret1 } -// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. func (mr *MockFullNodeMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerActiveSectors), arg0, arg1, arg2) } -// StateMinerAvailableBalance mocks base method +// StateMinerAvailableBalance mocks base method. func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) @@ -2254,13 +2314,13 @@ func (m *MockFullNode) StateMinerAvailableBalance(arg0 context.Context, arg1 add return ret0, ret1 } -// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. func (mr *MockFullNodeMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockFullNode)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) } -// StateMinerDeadlines mocks base method +// StateMinerDeadlines mocks base method. func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) ([]api.Deadline, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerDeadlines", arg0, arg1, arg2) @@ -2269,13 +2329,13 @@ func (m *MockFullNode) StateMinerDeadlines(arg0 context.Context, arg1 address.Ad return ret0, ret1 } -// StateMinerDeadlines indicates an expected call of StateMinerDeadlines +// StateMinerDeadlines indicates an expected call of StateMinerDeadlines. func (mr *MockFullNodeMockRecorder) StateMinerDeadlines(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerDeadlines", reflect.TypeOf((*MockFullNode)(nil).StateMinerDeadlines), arg0, arg1, arg2) } -// StateMinerFaults mocks base method +// StateMinerFaults mocks base method. func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerFaults", arg0, arg1, arg2) @@ -2284,13 +2344,13 @@ func (m *MockFullNode) StateMinerFaults(arg0 context.Context, arg1 address.Addre return ret0, ret1 } -// StateMinerFaults indicates an expected call of StateMinerFaults +// StateMinerFaults indicates an expected call of StateMinerFaults. func (mr *MockFullNodeMockRecorder) StateMinerFaults(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerFaults", reflect.TypeOf((*MockFullNode)(nil).StateMinerFaults), arg0, arg1, arg2) } -// StateMinerInfo mocks base method +// StateMinerInfo mocks base method. func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (miner.MinerInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) @@ -2299,13 +2359,13 @@ func (m *MockFullNode) StateMinerInfo(arg0 context.Context, arg1 address.Address return ret0, ret1 } -// StateMinerInfo indicates an expected call of StateMinerInfo +// StateMinerInfo indicates an expected call of StateMinerInfo. func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockFullNode)(nil).StateMinerInfo), arg0, arg1, arg2) } -// StateMinerInitialPledgeCollateral mocks base method +// StateMinerInitialPledgeCollateral mocks base method. func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) @@ -2314,13 +2374,13 @@ func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, a return ret0, ret1 } -// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. func (mr *MockFullNodeMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockFullNode)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) } -// StateMinerPartitions mocks base method +// StateMinerPartitions mocks base method. func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 types.TipSetKey) ([]api.Partition, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerPartitions", arg0, arg1, arg2, arg3) @@ -2329,13 +2389,13 @@ func (m *MockFullNode) StateMinerPartitions(arg0 context.Context, arg1 address.A return ret0, ret1 } -// StateMinerPartitions indicates an expected call of StateMinerPartitions +// StateMinerPartitions indicates an expected call of StateMinerPartitions. func (mr *MockFullNodeMockRecorder) StateMinerPartitions(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPartitions", reflect.TypeOf((*MockFullNode)(nil).StateMinerPartitions), arg0, arg1, arg2, arg3) } -// StateMinerPower mocks base method +// StateMinerPower mocks base method. func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.MinerPower, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerPower", arg0, arg1, arg2) @@ -2344,13 +2404,13 @@ func (m *MockFullNode) StateMinerPower(arg0 context.Context, arg1 address.Addres return ret0, ret1 } -// StateMinerPower indicates an expected call of StateMinerPower +// StateMinerPower indicates an expected call of StateMinerPower. func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPower), arg0, arg1, arg2) } -// StateMinerPreCommitDepositForPower mocks base method +// StateMinerPreCommitDepositForPower mocks base method. func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) @@ -2359,13 +2419,13 @@ func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, return ret0, ret1 } -// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower +// StateMinerPreCommitDepositForPower indicates an expected call of StateMinerPreCommitDepositForPower. func (mr *MockFullNodeMockRecorder) StateMinerPreCommitDepositForPower(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerPreCommitDepositForPower", reflect.TypeOf((*MockFullNode)(nil).StateMinerPreCommitDepositForPower), arg0, arg1, arg2, arg3) } -// StateMinerProvingDeadline mocks base method +// StateMinerProvingDeadline mocks base method. func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*dline.Info, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerProvingDeadline", arg0, arg1, arg2) @@ -2374,13 +2434,13 @@ func (m *MockFullNode) StateMinerProvingDeadline(arg0 context.Context, arg1 addr return ret0, ret1 } -// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline +// StateMinerProvingDeadline indicates an expected call of StateMinerProvingDeadline. func (mr *MockFullNodeMockRecorder) StateMinerProvingDeadline(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerProvingDeadline", reflect.TypeOf((*MockFullNode)(nil).StateMinerProvingDeadline), arg0, arg1, arg2) } -// StateMinerRecoveries mocks base method +// StateMinerRecoveries mocks base method. func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (bitfield.BitField, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerRecoveries", arg0, arg1, arg2) @@ -2389,13 +2449,13 @@ func (m *MockFullNode) StateMinerRecoveries(arg0 context.Context, arg1 address.A return ret0, ret1 } -// StateMinerRecoveries indicates an expected call of StateMinerRecoveries +// StateMinerRecoveries indicates an expected call of StateMinerRecoveries. func (mr *MockFullNodeMockRecorder) StateMinerRecoveries(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerRecoveries", reflect.TypeOf((*MockFullNode)(nil).StateMinerRecoveries), arg0, arg1, arg2) } -// StateMinerSectorAllocated mocks base method +// StateMinerSectorAllocated mocks base method. func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerSectorAllocated", arg0, arg1, arg2, arg3) @@ -2404,13 +2464,13 @@ func (m *MockFullNode) StateMinerSectorAllocated(arg0 context.Context, arg1 addr return ret0, ret1 } -// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated +// StateMinerSectorAllocated indicates an expected call of StateMinerSectorAllocated. func (mr *MockFullNodeMockRecorder) StateMinerSectorAllocated(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorAllocated", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorAllocated), arg0, arg1, arg2, arg3) } -// StateMinerSectorCount mocks base method +// StateMinerSectorCount mocks base method. func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MinerSectors, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerSectorCount", arg0, arg1, arg2) @@ -2419,13 +2479,13 @@ func (m *MockFullNode) StateMinerSectorCount(arg0 context.Context, arg1 address. return ret0, ret1 } -// StateMinerSectorCount indicates an expected call of StateMinerSectorCount +// StateMinerSectorCount indicates an expected call of StateMinerSectorCount. func (mr *MockFullNodeMockRecorder) StateMinerSectorCount(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectorCount", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectorCount), arg0, arg1, arg2) } -// StateMinerSectors mocks base method +// StateMinerSectors mocks base method. func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Address, arg2 *bitfield.BitField, arg3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerSectors", arg0, arg1, arg2, arg3) @@ -2434,13 +2494,13 @@ func (m *MockFullNode) StateMinerSectors(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// StateMinerSectors indicates an expected call of StateMinerSectors +// StateMinerSectors indicates an expected call of StateMinerSectors. func (mr *MockFullNodeMockRecorder) StateMinerSectors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerSectors", reflect.TypeOf((*MockFullNode)(nil).StateMinerSectors), arg0, arg1, arg2, arg3) } -// StateNetworkName mocks base method +// StateNetworkName mocks base method. func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkName, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateNetworkName", arg0) @@ -2449,13 +2509,13 @@ func (m *MockFullNode) StateNetworkName(arg0 context.Context) (dtypes.NetworkNam return ret0, ret1 } -// StateNetworkName indicates an expected call of StateNetworkName +// StateNetworkName indicates an expected call of StateNetworkName. func (mr *MockFullNodeMockRecorder) StateNetworkName(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkName", reflect.TypeOf((*MockFullNode)(nil).StateNetworkName), arg0) } -// StateNetworkVersion mocks base method +// StateNetworkVersion mocks base method. func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipSetKey) (network.Version, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) @@ -2464,13 +2524,13 @@ func (m *MockFullNode) StateNetworkVersion(arg0 context.Context, arg1 types.TipS return ret0, ret1 } -// StateNetworkVersion indicates an expected call of StateNetworkVersion +// StateNetworkVersion indicates an expected call of StateNetworkVersion. func (mr *MockFullNodeMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockFullNode)(nil).StateNetworkVersion), arg0, arg1) } -// StateReadState mocks base method +// StateReadState mocks base method. func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*api.ActorState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateReadState", arg0, arg1, arg2) @@ -2479,13 +2539,13 @@ func (m *MockFullNode) StateReadState(arg0 context.Context, arg1 address.Address return ret0, ret1 } -// StateReadState indicates an expected call of StateReadState +// StateReadState indicates an expected call of StateReadState. func (mr *MockFullNodeMockRecorder) StateReadState(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReadState", reflect.TypeOf((*MockFullNode)(nil).StateReadState), arg0, arg1, arg2) } -// StateReplay mocks base method +// StateReplay mocks base method. func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid) (*api.InvocResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateReplay", arg0, arg1, arg2) @@ -2494,13 +2554,13 @@ func (m *MockFullNode) StateReplay(arg0 context.Context, arg1 types.TipSetKey, a return ret0, ret1 } -// StateReplay indicates an expected call of StateReplay +// StateReplay indicates an expected call of StateReplay. func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateReplay", reflect.TypeOf((*MockFullNode)(nil).StateReplay), arg0, arg1, arg2) } -// StateSearchMsg mocks base method +// StateSearchMsg mocks base method. func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 cid.Cid) (*api.MsgLookup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1) @@ -2509,13 +2569,13 @@ func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 cid.Cid) (*api. return ret0, ret1 } -// StateSearchMsg indicates an expected call of StateSearchMsg +// StateSearchMsg indicates an expected call of StateSearchMsg. func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1) } -// StateSearchMsgLimited mocks base method +// StateSearchMsgLimited mocks base method. func (m *MockFullNode) StateSearchMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 abi.ChainEpoch) (*api.MsgLookup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSearchMsgLimited", arg0, arg1, arg2) @@ -2524,13 +2584,13 @@ func (m *MockFullNode) StateSearchMsgLimited(arg0 context.Context, arg1 cid.Cid, return ret0, ret1 } -// StateSearchMsgLimited indicates an expected call of StateSearchMsgLimited +// StateSearchMsgLimited indicates an expected call of StateSearchMsgLimited. func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsgLimited), arg0, arg1, arg2) } -// StateSectorExpiration mocks base method +// StateSectorExpiration mocks base method. func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorExpiration, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) @@ -2539,13 +2599,13 @@ func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address. return ret0, ret1 } -// StateSectorExpiration indicates an expected call of StateSectorExpiration +// StateSectorExpiration indicates an expected call of StateSectorExpiration. func (mr *MockFullNodeMockRecorder) StateSectorExpiration(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorExpiration", reflect.TypeOf((*MockFullNode)(nil).StateSectorExpiration), arg0, arg1, arg2, arg3) } -// StateSectorGetInfo mocks base method +// StateSectorGetInfo mocks base method. func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorGetInfo", arg0, arg1, arg2, arg3) @@ -2554,13 +2614,13 @@ func (m *MockFullNode) StateSectorGetInfo(arg0 context.Context, arg1 address.Add return ret0, ret1 } -// StateSectorGetInfo indicates an expected call of StateSectorGetInfo +// StateSectorGetInfo indicates an expected call of StateSectorGetInfo. func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorGetInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorGetInfo), arg0, arg1, arg2, arg3) } -// StateSectorPartition mocks base method +// StateSectorPartition mocks base method. func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorLocation, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) @@ -2569,13 +2629,13 @@ func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.A return ret0, ret1 } -// StateSectorPartition indicates an expected call of StateSectorPartition +// StateSectorPartition indicates an expected call of StateSectorPartition. func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPartition", reflect.TypeOf((*MockFullNode)(nil).StateSectorPartition), arg0, arg1, arg2, arg3) } -// StateSectorPreCommitInfo mocks base method +// StateSectorPreCommitInfo mocks base method. func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) @@ -2584,13 +2644,13 @@ func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 addre return ret0, ret1 } -// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. func (mr *MockFullNodeMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockFullNode)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) } -// StateVMCirculatingSupplyInternal mocks base method +// StateVMCirculatingSupplyInternal mocks base method. func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) @@ -2599,13 +2659,13 @@ func (m *MockFullNode) StateVMCirculatingSupplyInternal(arg0 context.Context, ar return ret0, ret1 } -// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. func (mr *MockFullNodeMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockFullNode)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) } -// StateVerifiedClientStatus mocks base method +// StateVerifiedClientStatus mocks base method. func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVerifiedClientStatus", arg0, arg1, arg2) @@ -2614,13 +2674,13 @@ func (m *MockFullNode) StateVerifiedClientStatus(arg0 context.Context, arg1 addr return ret0, ret1 } -// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus +// StateVerifiedClientStatus indicates an expected call of StateVerifiedClientStatus. func (mr *MockFullNodeMockRecorder) StateVerifiedClientStatus(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedClientStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedClientStatus), arg0, arg1, arg2) } -// StateVerifiedRegistryRootKey mocks base method +// StateVerifiedRegistryRootKey mocks base method. func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 types.TipSetKey) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVerifiedRegistryRootKey", arg0, arg1) @@ -2629,13 +2689,13 @@ func (m *MockFullNode) StateVerifiedRegistryRootKey(arg0 context.Context, arg1 t return ret0, ret1 } -// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey +// StateVerifiedRegistryRootKey indicates an expected call of StateVerifiedRegistryRootKey. func (mr *MockFullNodeMockRecorder) StateVerifiedRegistryRootKey(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifiedRegistryRootKey", reflect.TypeOf((*MockFullNode)(nil).StateVerifiedRegistryRootKey), arg0, arg1) } -// StateVerifierStatus mocks base method +// StateVerifierStatus mocks base method. func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateVerifierStatus", arg0, arg1, arg2) @@ -2644,13 +2704,13 @@ func (m *MockFullNode) StateVerifierStatus(arg0 context.Context, arg1 address.Ad return ret0, ret1 } -// StateVerifierStatus indicates an expected call of StateVerifierStatus +// StateVerifierStatus indicates an expected call of StateVerifierStatus. func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVerifierStatus", reflect.TypeOf((*MockFullNode)(nil).StateVerifierStatus), arg0, arg1, arg2) } -// StateWaitMsg mocks base method +// StateWaitMsg mocks base method. func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64) (*api.MsgLookup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2) @@ -2659,13 +2719,13 @@ func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uin return ret0, ret1 } -// StateWaitMsg indicates an expected call of StateWaitMsg +// StateWaitMsg indicates an expected call of StateWaitMsg. func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2) } -// StateWaitMsgLimited mocks base method +// StateWaitMsgLimited mocks base method. func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch) (*api.MsgLookup, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateWaitMsgLimited", arg0, arg1, arg2, arg3) @@ -2674,13 +2734,13 @@ func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, a return ret0, ret1 } -// StateWaitMsgLimited indicates an expected call of StateWaitMsgLimited +// StateWaitMsgLimited indicates an expected call of StateWaitMsgLimited. func (mr *MockFullNodeMockRecorder) StateWaitMsgLimited(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsgLimited), arg0, arg1, arg2, arg3) } -// SyncCheckBad mocks base method +// SyncCheckBad mocks base method. func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncCheckBad", arg0, arg1) @@ -2689,13 +2749,13 @@ func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, return ret0, ret1 } -// SyncCheckBad indicates an expected call of SyncCheckBad +// SyncCheckBad indicates an expected call of SyncCheckBad. func (mr *MockFullNodeMockRecorder) SyncCheckBad(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckBad", reflect.TypeOf((*MockFullNode)(nil).SyncCheckBad), arg0, arg1) } -// SyncCheckpoint mocks base method +// SyncCheckpoint mocks base method. func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncCheckpoint", arg0, arg1) @@ -2703,13 +2763,13 @@ func (m *MockFullNode) SyncCheckpoint(arg0 context.Context, arg1 types.TipSetKey return ret0 } -// SyncCheckpoint indicates an expected call of SyncCheckpoint +// SyncCheckpoint indicates an expected call of SyncCheckpoint. func (mr *MockFullNodeMockRecorder) SyncCheckpoint(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncCheckpoint", reflect.TypeOf((*MockFullNode)(nil).SyncCheckpoint), arg0, arg1) } -// SyncIncomingBlocks mocks base method +// SyncIncomingBlocks mocks base method. func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.BlockHeader, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncIncomingBlocks", arg0) @@ -2718,13 +2778,13 @@ func (m *MockFullNode) SyncIncomingBlocks(arg0 context.Context) (<-chan *types.B return ret0, ret1 } -// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks +// SyncIncomingBlocks indicates an expected call of SyncIncomingBlocks. func (mr *MockFullNodeMockRecorder) SyncIncomingBlocks(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncIncomingBlocks", reflect.TypeOf((*MockFullNode)(nil).SyncIncomingBlocks), arg0) } -// SyncMarkBad mocks base method +// SyncMarkBad mocks base method. func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncMarkBad", arg0, arg1) @@ -2732,13 +2792,13 @@ func (m *MockFullNode) SyncMarkBad(arg0 context.Context, arg1 cid.Cid) error { return ret0 } -// SyncMarkBad indicates an expected call of SyncMarkBad +// SyncMarkBad indicates an expected call of SyncMarkBad. func (mr *MockFullNodeMockRecorder) SyncMarkBad(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncMarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncMarkBad), arg0, arg1) } -// SyncState mocks base method +// SyncState mocks base method. func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncState", arg0) @@ -2747,13 +2807,13 @@ func (m *MockFullNode) SyncState(arg0 context.Context) (*api.SyncState, error) { return ret0, ret1 } -// SyncState indicates an expected call of SyncState +// SyncState indicates an expected call of SyncState. func (mr *MockFullNodeMockRecorder) SyncState(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncState", reflect.TypeOf((*MockFullNode)(nil).SyncState), arg0) } -// SyncSubmitBlock mocks base method +// SyncSubmitBlock mocks base method. func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMsg) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncSubmitBlock", arg0, arg1) @@ -2761,13 +2821,13 @@ func (m *MockFullNode) SyncSubmitBlock(arg0 context.Context, arg1 *types.BlockMs return ret0 } -// SyncSubmitBlock indicates an expected call of SyncSubmitBlock +// SyncSubmitBlock indicates an expected call of SyncSubmitBlock. func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) } -// SyncUnmarkAllBad mocks base method +// SyncUnmarkAllBad mocks base method. func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncUnmarkAllBad", arg0) @@ -2775,13 +2835,13 @@ func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { return ret0 } -// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad +// SyncUnmarkAllBad indicates an expected call of SyncUnmarkAllBad. func (mr *MockFullNodeMockRecorder) SyncUnmarkAllBad(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkAllBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkAllBad), arg0) } -// SyncUnmarkBad mocks base method +// SyncUnmarkBad mocks base method. func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncUnmarkBad", arg0, arg1) @@ -2789,13 +2849,13 @@ func (m *MockFullNode) SyncUnmarkBad(arg0 context.Context, arg1 cid.Cid) error { return ret0 } -// SyncUnmarkBad indicates an expected call of SyncUnmarkBad +// SyncUnmarkBad indicates an expected call of SyncUnmarkBad. func (mr *MockFullNodeMockRecorder) SyncUnmarkBad(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncUnmarkBad", reflect.TypeOf((*MockFullNode)(nil).SyncUnmarkBad), arg0, arg1) } -// SyncValidateTipset mocks base method +// SyncValidateTipset mocks base method. func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSetKey) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncValidateTipset", arg0, arg1) @@ -2804,13 +2864,13 @@ func (m *MockFullNode) SyncValidateTipset(arg0 context.Context, arg1 types.TipSe return ret0, ret1 } -// SyncValidateTipset indicates an expected call of SyncValidateTipset +// SyncValidateTipset indicates an expected call of SyncValidateTipset. func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1) } -// Version mocks base method +// Version mocks base method. func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Version", arg0) @@ -2819,13 +2879,13 @@ func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { return ret0, ret1 } -// Version indicates an expected call of Version +// Version indicates an expected call of Version. func (mr *MockFullNodeMockRecorder) Version(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockFullNode)(nil).Version), arg0) } -// WalletBalance mocks base method +// WalletBalance mocks base method. func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) (big.Int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletBalance", arg0, arg1) @@ -2834,13 +2894,13 @@ func (m *MockFullNode) WalletBalance(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// WalletBalance indicates an expected call of WalletBalance +// WalletBalance indicates an expected call of WalletBalance. func (mr *MockFullNodeMockRecorder) WalletBalance(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletBalance", reflect.TypeOf((*MockFullNode)(nil).WalletBalance), arg0, arg1) } -// WalletDefaultAddress mocks base method +// WalletDefaultAddress mocks base method. func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletDefaultAddress", arg0) @@ -2849,13 +2909,13 @@ func (m *MockFullNode) WalletDefaultAddress(arg0 context.Context) (address.Addre return ret0, ret1 } -// WalletDefaultAddress indicates an expected call of WalletDefaultAddress +// WalletDefaultAddress indicates an expected call of WalletDefaultAddress. func (mr *MockFullNodeMockRecorder) WalletDefaultAddress(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDefaultAddress", reflect.TypeOf((*MockFullNode)(nil).WalletDefaultAddress), arg0) } -// WalletDelete mocks base method +// WalletDelete mocks base method. func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletDelete", arg0, arg1) @@ -2863,13 +2923,13 @@ func (m *MockFullNode) WalletDelete(arg0 context.Context, arg1 address.Address) return ret0 } -// WalletDelete indicates an expected call of WalletDelete +// WalletDelete indicates an expected call of WalletDelete. func (mr *MockFullNodeMockRecorder) WalletDelete(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletDelete", reflect.TypeOf((*MockFullNode)(nil).WalletDelete), arg0, arg1) } -// WalletExport mocks base method +// WalletExport mocks base method. func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) (*types.KeyInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletExport", arg0, arg1) @@ -2878,13 +2938,13 @@ func (m *MockFullNode) WalletExport(arg0 context.Context, arg1 address.Address) return ret0, ret1 } -// WalletExport indicates an expected call of WalletExport +// WalletExport indicates an expected call of WalletExport. func (mr *MockFullNodeMockRecorder) WalletExport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletExport", reflect.TypeOf((*MockFullNode)(nil).WalletExport), arg0, arg1) } -// WalletHas mocks base method +// WalletHas mocks base method. func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletHas", arg0, arg1) @@ -2893,13 +2953,13 @@ func (m *MockFullNode) WalletHas(arg0 context.Context, arg1 address.Address) (bo return ret0, ret1 } -// WalletHas indicates an expected call of WalletHas +// WalletHas indicates an expected call of WalletHas. func (mr *MockFullNodeMockRecorder) WalletHas(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletHas", reflect.TypeOf((*MockFullNode)(nil).WalletHas), arg0, arg1) } -// WalletImport mocks base method +// WalletImport mocks base method. func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletImport", arg0, arg1) @@ -2908,13 +2968,13 @@ func (m *MockFullNode) WalletImport(arg0 context.Context, arg1 *types.KeyInfo) ( return ret0, ret1 } -// WalletImport indicates an expected call of WalletImport +// WalletImport indicates an expected call of WalletImport. func (mr *MockFullNodeMockRecorder) WalletImport(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletImport", reflect.TypeOf((*MockFullNode)(nil).WalletImport), arg0, arg1) } -// WalletList mocks base method +// WalletList mocks base method. func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletList", arg0) @@ -2923,13 +2983,13 @@ func (m *MockFullNode) WalletList(arg0 context.Context) ([]address.Address, erro return ret0, ret1 } -// WalletList indicates an expected call of WalletList +// WalletList indicates an expected call of WalletList. func (mr *MockFullNodeMockRecorder) WalletList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletList", reflect.TypeOf((*MockFullNode)(nil).WalletList), arg0) } -// WalletNew mocks base method +// WalletNew mocks base method. func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletNew", arg0, arg1) @@ -2938,13 +2998,13 @@ func (m *MockFullNode) WalletNew(arg0 context.Context, arg1 types.KeyType) (addr return ret0, ret1 } -// WalletNew indicates an expected call of WalletNew +// WalletNew indicates an expected call of WalletNew. func (mr *MockFullNodeMockRecorder) WalletNew(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletNew", reflect.TypeOf((*MockFullNode)(nil).WalletNew), arg0, arg1) } -// WalletSetDefault mocks base method +// WalletSetDefault mocks base method. func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Address) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletSetDefault", arg0, arg1) @@ -2952,13 +3012,13 @@ func (m *MockFullNode) WalletSetDefault(arg0 context.Context, arg1 address.Addre return ret0 } -// WalletSetDefault indicates an expected call of WalletSetDefault +// WalletSetDefault indicates an expected call of WalletSetDefault. func (mr *MockFullNodeMockRecorder) WalletSetDefault(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSetDefault", reflect.TypeOf((*MockFullNode)(nil).WalletSetDefault), arg0, arg1) } -// WalletSign mocks base method +// WalletSign mocks base method. func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, arg2 []byte) (*crypto.Signature, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletSign", arg0, arg1, arg2) @@ -2967,13 +3027,13 @@ func (m *MockFullNode) WalletSign(arg0 context.Context, arg1 address.Address, ar return ret0, ret1 } -// WalletSign indicates an expected call of WalletSign +// WalletSign indicates an expected call of WalletSign. func (mr *MockFullNodeMockRecorder) WalletSign(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSign", reflect.TypeOf((*MockFullNode)(nil).WalletSign), arg0, arg1, arg2) } -// WalletSignMessage mocks base method +// WalletSignMessage mocks base method. func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Address, arg2 *types.Message) (*types.SignedMessage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletSignMessage", arg0, arg1, arg2) @@ -2982,13 +3042,13 @@ func (m *MockFullNode) WalletSignMessage(arg0 context.Context, arg1 address.Addr return ret0, ret1 } -// WalletSignMessage indicates an expected call of WalletSignMessage +// WalletSignMessage indicates an expected call of WalletSignMessage. func (mr *MockFullNodeMockRecorder) WalletSignMessage(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletSignMessage", reflect.TypeOf((*MockFullNode)(nil).WalletSignMessage), arg0, arg1, arg2) } -// WalletValidateAddress mocks base method +// WalletValidateAddress mocks base method. func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) (address.Address, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletValidateAddress", arg0, arg1) @@ -2997,13 +3057,13 @@ func (m *MockFullNode) WalletValidateAddress(arg0 context.Context, arg1 string) return ret0, ret1 } -// WalletValidateAddress indicates an expected call of WalletValidateAddress +// WalletValidateAddress indicates an expected call of WalletValidateAddress. func (mr *MockFullNodeMockRecorder) WalletValidateAddress(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletValidateAddress", reflect.TypeOf((*MockFullNode)(nil).WalletValidateAddress), arg0, arg1) } -// WalletVerify mocks base method +// WalletVerify mocks base method. func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, arg2 []byte, arg3 *crypto.Signature) (bool, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WalletVerify", arg0, arg1, arg2, arg3) @@ -3012,7 +3072,7 @@ func (m *MockFullNode) WalletVerify(arg0 context.Context, arg1 address.Address, return ret0, ret1 } -// WalletVerify indicates an expected call of WalletVerify +// WalletVerify indicates an expected call of WalletVerify. func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3) diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go index e977c6b67..ff4474fe5 100644 --- a/api/v0api/v1_wrapper.go +++ b/api/v0api/v1_wrapper.go @@ -3,7 +3,9 @@ package v0api import ( "context" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/types" + "golang.org/x/xerrors" "github.com/ipfs/go-cid" @@ -57,4 +59,129 @@ func (w *WrapperV1Full) Version(ctx context.Context) (api.APIVersion, error) { return ver, nil } +func (w *WrapperV1Full) executePrototype(ctx context.Context, p *api.MessagePrototype) (cid.Cid, error) { + sm, err := w.FullNode.MpoolPushMessage(ctx, &p.Message, nil) + if err != nil { + return cid.Undef, xerrors.Errorf("pushing message: %w", err) + } + + return sm.Cid(), nil +} +func (w *WrapperV1Full) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { + + p, err := w.FullNode.MsigCreate(ctx, req, addrs, duration, val, src, gp) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + + p, err := w.FullNode.MsigPropose(ctx, msig, to, amt, src, method, params) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} +func (w *WrapperV1Full) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigApprove(ctx, msig, txID, src) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + p, err := w.FullNode.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { + p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigAddPropose(ctx, msig, src, newAdd, inc) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigAddCancel(ctx, msig, src, txID, newAdd, inc) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { + + p, err := w.FullNode.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + +func (w *WrapperV1Full) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) { + + p, err := w.FullNode.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease) + if err != nil { + return cid.Undef, xerrors.Errorf("creating prototype: %w", err) + } + + return w.executePrototype(ctx, p) +} + var _ FullNode = &WrapperV1Full{} diff --git a/api/version.go b/api/version.go index 743170f04..687f5135a 100644 --- a/api/version.go +++ b/api/version.go @@ -57,8 +57,8 @@ var ( FullAPIVersion0 = newVer(1, 3, 0) FullAPIVersion1 = newVer(2, 1, 0) - MinerAPIVersion0 = newVer(1, 0, 1) - WorkerAPIVersion0 = newVer(1, 0, 0) + MinerAPIVersion0 = newVer(1, 2, 0) + WorkerAPIVersion0 = newVer(1, 1, 0) ) //nolint:varcheck,deadcode diff --git a/api/wrap.go b/api/wrap.go index 1ded67132..b26489a42 100644 --- a/api/wrap.go +++ b/api/wrap.go @@ -26,6 +26,27 @@ func Wrap(proxyT, wrapperT, impl interface{}) interface{} { })) } + for i := 0; i < proxy.Elem().NumField(); i++ { + if proxy.Elem().Type().Field(i).Name == "Internal" { + continue + } + + subProxy := proxy.Elem().Field(i).FieldByName("Internal") + for i := 0; i < ri.NumMethod(); i++ { + mt := ri.Type().Method(i) + if subProxy.FieldByName(mt.Name).Kind() == reflect.Invalid { + continue + } + + fn := ri.Method(i) + of := subProxy.FieldByName(mt.Name) + + subProxy.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) { + return fn.Call(args) + })) + } + } + wp := reflect.New(reflect.TypeOf(wrapperT).Elem()) wp.Elem().Field(0).Set(proxy) return wp.Interface() diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go index e03266ab7..82f0e3360 100644 --- a/blockstore/badger/blockstore.go +++ b/blockstore/badger/blockstore.go @@ -5,7 +5,7 @@ import ( "fmt" "io" "runtime" - "sync/atomic" + "sync" "github.com/dgraph-io/badger/v2" "github.com/dgraph-io/badger/v2/options" @@ -73,20 +73,16 @@ func (b *badgerLogger) Warningf(format string, args ...interface{}) { } const ( - stateOpen int64 = iota + stateOpen = iota stateClosing stateClosed ) // Blockstore is a badger-backed IPLD blockstore. -// -// NOTE: once Close() is called, methods will try their best to return -// ErrBlockstoreClosed. This will guaranteed to happen for all subsequent -// operation calls after Close() has returned, but it may not happen for -// operations in progress. Those are likely to fail with a different error. type Blockstore struct { - // state is accessed atomically - state int64 + stateLk sync.RWMutex + state int + viewers sync.WaitGroup DB *badger.DB @@ -97,6 +93,8 @@ type Blockstore struct { var _ blockstore.Blockstore = (*Blockstore)(nil) var _ blockstore.Viewer = (*Blockstore)(nil) +var _ blockstore.BlockstoreIterator = (*Blockstore)(nil) +var _ blockstore.BlockstoreGC = (*Blockstore)(nil) var _ io.Closer = (*Blockstore)(nil) // Open creates a new badger-backed blockstore, with the supplied options. @@ -124,53 +122,82 @@ func Open(opts Options) (*Blockstore, error) { // Close closes the store. If the store has already been closed, this noops and // returns an error, even if the first closure resulted in error. func (b *Blockstore) Close() error { - if !atomic.CompareAndSwapInt64(&b.state, stateOpen, stateClosing) { + b.stateLk.Lock() + if b.state != stateOpen { + b.stateLk.Unlock() return nil } + b.state = stateClosing + b.stateLk.Unlock() + + defer func() { + b.stateLk.Lock() + b.state = stateClosed + b.stateLk.Unlock() + }() + + // wait for all accesses to complete + b.viewers.Wait() - defer atomic.StoreInt64(&b.state, stateClosed) return b.DB.Close() } +func (b *Blockstore) access() error { + b.stateLk.RLock() + defer b.stateLk.RUnlock() + + if b.state != stateOpen { + return ErrBlockstoreClosed + } + + b.viewers.Add(1) + return nil +} + +func (b *Blockstore) isOpen() bool { + b.stateLk.RLock() + defer b.stateLk.RUnlock() + + return b.state == stateOpen +} + // CollectGarbage runs garbage collection on the value log func (b *Blockstore) CollectGarbage() error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + // compact first to gather the necessary statistics for GC + nworkers := runtime.NumCPU() / 2 + if nworkers < 2 { + nworkers = 2 + } + + err := b.DB.Flatten(nworkers) + if err != nil { + return err } - var err error for err == nil { err = b.DB.RunValueLogGC(0.125) } if err == badger.ErrNoRewrite { - // not really an error in this case + // not really an error in this case, it signals the end of GC return nil } return err } -// Compact runs a synchronous compaction -func (b *Blockstore) Compact() error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed - } - - nworkers := runtime.NumCPU() / 2 - if nworkers < 2 { - nworkers = 2 - } - - return b.DB.Flatten(nworkers) -} - // View implements blockstore.Viewer, which leverages zero-copy read-only // access to values. func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() k, pooled := b.PooledStorageKey(cid) if pooled { @@ -191,9 +218,10 @@ func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { // Has implements Blockstore.Has. func (b *Blockstore) Has(cid cid.Cid) (bool, error) { - if atomic.LoadInt64(&b.state) != stateOpen { - return false, ErrBlockstoreClosed + if err := b.access(); err != nil { + return false, err } + defer b.viewers.Done() k, pooled := b.PooledStorageKey(cid) if pooled { @@ -221,9 +249,10 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { return nil, blockstore.ErrNotFound } - if atomic.LoadInt64(&b.state) != stateOpen { - return nil, ErrBlockstoreClosed + if err := b.access(); err != nil { + return nil, err } + defer b.viewers.Done() k, pooled := b.PooledStorageKey(cid) if pooled { @@ -250,9 +279,10 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { // GetSize implements Blockstore.GetSize. func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { - if atomic.LoadInt64(&b.state) != stateOpen { - return -1, ErrBlockstoreClosed + if err := b.access(); err != nil { + return 0, err } + defer b.viewers.Done() k, pooled := b.PooledStorageKey(cid) if pooled { @@ -279,9 +309,10 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { // Put implements Blockstore.Put. func (b *Blockstore) Put(block blocks.Block) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() k, pooled := b.PooledStorageKey(block.Cid()) if pooled { @@ -299,9 +330,10 @@ func (b *Blockstore) Put(block blocks.Block) error { // PutMany implements Blockstore.PutMany. func (b *Blockstore) PutMany(blocks []blocks.Block) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() // toReturn tracks the byte slices to return to the pool, if we're using key // prefixing. we can't return each slice to the pool after each Set, because @@ -338,9 +370,10 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error { // DeleteBlock implements Blockstore.DeleteBlock. func (b *Blockstore) DeleteBlock(cid cid.Cid) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() k, pooled := b.PooledStorageKey(cid) if pooled { @@ -353,9 +386,10 @@ func (b *Blockstore) DeleteBlock(cid cid.Cid) error { } func (b *Blockstore) DeleteMany(cids []cid.Cid) error { - if atomic.LoadInt64(&b.state) != stateOpen { - return ErrBlockstoreClosed + if err := b.access(); err != nil { + return err } + defer b.viewers.Done() // toReturn tracks the byte slices to return to the pool, if we're using key // prefixing. we can't return each slice to the pool after each Set, because @@ -392,8 +426,8 @@ func (b *Blockstore) DeleteMany(cids []cid.Cid) error { // AllKeysChan implements Blockstore.AllKeysChan. func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - if atomic.LoadInt64(&b.state) != stateOpen { - return nil, ErrBlockstoreClosed + if err := b.access(); err != nil { + return nil, err } txn := b.DB.NewTransaction(false) @@ -405,6 +439,7 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { ch := make(chan cid.Cid) go func() { + defer b.viewers.Done() defer close(ch) defer iter.Close() @@ -415,7 +450,7 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { if ctx.Err() != nil { return // context has fired. } - if atomic.LoadInt64(&b.state) != stateOpen { + if !b.isOpen() { // open iterators will run even after the database is closed... return // closing, yield. } @@ -442,6 +477,56 @@ func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return ch, nil } +// Implementation of BlockstoreIterator interface +func (b *Blockstore) ForEachKey(f func(cid.Cid) error) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + txn := b.DB.NewTransaction(false) + defer txn.Discard() + + opts := badger.IteratorOptions{PrefetchSize: 100} + if b.prefixing { + opts.Prefix = b.prefix + } + + iter := txn.NewIterator(opts) + defer iter.Close() + + var buf []byte + for iter.Rewind(); iter.Valid(); iter.Next() { + if !b.isOpen() { + return ErrBlockstoreClosed + } + + k := iter.Item().Key() + if b.prefixing { + k = k[b.prefixLen:] + } + + klen := base32.RawStdEncoding.DecodedLen(len(k)) + if klen > len(buf) { + buf = make([]byte, klen) + } + + n, err := base32.RawStdEncoding.Decode(buf, k) + if err != nil { + return err + } + + c := cid.NewCidV1(cid.Raw, buf[:n]) + + err = f(c) + if err != nil { + return err + } + } + + return nil +} + // HashOnRead implements Blockstore.HashOnRead. It is not supported by this // blockstore. func (b *Blockstore) HashOnRead(_ bool) { diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 23f0bd754..97f9f5f7b 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -30,6 +30,16 @@ type BatchDeleter interface { DeleteMany(cids []cid.Cid) error } +// BlockstoreIterator is a trait for efficient iteration +type BlockstoreIterator interface { + ForEachKey(func(cid.Cid) error) error +} + +// BlockstoreGC is a trait for blockstores that support online garbage collection +type BlockstoreGC interface { + CollectGarbage() error +} + // WrapIDStore wraps the underlying blockstore in an "identity" blockstore. // The ID store filters out all puts for blocks with CIDs using the "identity" // hash function. It also extracts inlined blocks from CIDs using the identity diff --git a/blockstore/discard.go b/blockstore/discard.go new file mode 100644 index 000000000..afd0651bc --- /dev/null +++ b/blockstore/discard.go @@ -0,0 +1,66 @@ +package blockstore + +import ( + "context" + "io" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +var _ Blockstore = (*discardstore)(nil) + +type discardstore struct { + bs Blockstore +} + +func NewDiscardStore(bs Blockstore) Blockstore { + return &discardstore{bs: bs} +} + +func (b *discardstore) Has(cid cid.Cid) (bool, error) { + return b.bs.Has(cid) +} + +func (b *discardstore) HashOnRead(hor bool) { + b.bs.HashOnRead(hor) +} + +func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) { + return b.bs.Get(cid) +} + +func (b *discardstore) GetSize(cid cid.Cid) (int, error) { + return b.bs.GetSize(cid) +} + +func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error { + return b.bs.View(cid, f) +} + +func (b *discardstore) Put(blk blocks.Block) error { + return nil +} + +func (b *discardstore) PutMany(blks []blocks.Block) error { + return nil +} + +func (b *discardstore) DeleteBlock(cid cid.Cid) error { + return nil +} + +func (b *discardstore) DeleteMany(cids []cid.Cid) error { + return nil +} + +func (b *discardstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.bs.AllKeysChan(ctx) +} + +func (b *discardstore) Close() error { + if c, ok := b.bs.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/blockstore/splitstore/README.md b/blockstore/splitstore/README.md new file mode 100644 index 000000000..1c6569a34 --- /dev/null +++ b/blockstore/splitstore/README.md @@ -0,0 +1,72 @@ +# SplitStore: An actively scalable blockstore for the Filecoin chain + +The SplitStore was first introduced in lotus v1.5.1, as an experiment +in reducing the performance impact of large blockstores. + +With lotus v1.11.1, we introduce the next iteration in design and +implementation, which we call SplitStore v1. + +The new design (see [#6474](https://github.com/filecoin-project/lotus/pull/6474) +evolves the splitstore to be a freestanding compacting blockstore that +allows us to keep a small (60-100GB) working set in a hot blockstore +and reliably archive out of scope objects in a coldstore. The +coldstore can also be a discard store, whereby out of scope objects +are discarded or a regular badger blockstore (the default), which can +be periodically garbage collected according to configurable user +retention policies. + +To enable the splitstore, edit `.lotus/config.toml` and add the following: +``` +[Chainstore] + EnableSplitstore = true +``` + +If you intend to use the discard coldstore, your also need to add the following: +``` + [Chainstore.Splitstore] + ColdStoreType = "discard" +``` +In general you _should not_ have to use the discard store, unless you +are running a network booster or have very constrained hardware with +not enough disk space to maintain a coldstore, even with garbage +collection. + + +## Operation + +When the splitstore is first enabled, the existing blockstore becomes +the coldstore and a fresh hotstore is initialized. + +The hotstore is warmed up on first startup so as to load all chain +headers and state roots in the current head. This allows us to +immediately gain the performance benefits of a smallerblockstore which +can be substantial for full archival nodes. + +All new writes are directed to the hotstore, while reads first hit the +hotstore, with fallback to the coldstore. + +Once 5 finalities have ellapsed, and every finality henceforth, the +blockstore _compacts_. Compaction is the process of moving all +unreachable objects within the last 4 finalities from the hotstore to +the coldstore. If the system is configured with a discard coldstore, +these objects are discarded. Note that chain headers, all the way to +genesis, are considered reachable. Stateroots and messages are +considered reachable only within the last 4 finalities, unless there +is a live reference to them. + +## Compaction + +Compaction works transactionally with the following algorithm: +- We prepare a transaction, whereby all i/o referenced objects through the API are tracked. +- We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis. +- Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references. +- We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge. +- When running with a coldstore, we next copy all cold objects to the coldstore. +- At this point we are ready to begin purging: + - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) + - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live +- We then end the transaction and compact/gc the hotstore. + +## Coldstore Garbage Collection + +TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577) diff --git a/blockstore/splitstore/debug.go b/blockstore/splitstore/debug.go new file mode 100644 index 000000000..2be85ebfe --- /dev/null +++ b/blockstore/splitstore/debug.go @@ -0,0 +1,273 @@ +package splitstore + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime/debug" + "strings" + "sync" + "time" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +type debugLog struct { + readLog, writeLog, deleteLog, stackLog *debugLogOp + + stackMx sync.Mutex + stackMap map[string]string +} + +type debugLogOp struct { + path string + mx sync.Mutex + log *os.File + count int +} + +func openDebugLog(path string) (*debugLog, error) { + basePath := filepath.Join(path, "debug") + err := os.MkdirAll(basePath, 0755) + if err != nil { + return nil, err + } + + readLog, err := openDebugLogOp(basePath, "read.log") + if err != nil { + return nil, err + } + + writeLog, err := openDebugLogOp(basePath, "write.log") + if err != nil { + _ = readLog.Close() + return nil, err + } + + deleteLog, err := openDebugLogOp(basePath, "delete.log") + if err != nil { + _ = readLog.Close() + _ = writeLog.Close() + return nil, err + } + + stackLog, err := openDebugLogOp(basePath, "stack.log") + if err != nil { + _ = readLog.Close() + _ = writeLog.Close() + _ = deleteLog.Close() + return nil, xerrors.Errorf("error opening stack log: %w", err) + } + + return &debugLog{ + readLog: readLog, + writeLog: writeLog, + deleteLog: deleteLog, + stackLog: stackLog, + stackMap: make(map[string]string), + }, nil +} + +func (d *debugLog) LogReadMiss(cid cid.Cid) { + if d == nil { + return + } + + stack := d.getStack() + err := d.readLog.Log("%s %s %s\n", d.timestamp(), cid, stack) + if err != nil { + log.Warnf("error writing read log: %s", err) + } +} + +func (d *debugLog) LogWrite(blk blocks.Block) { + if d == nil { + return + } + + var stack string + if enableDebugLogWriteTraces { + stack = " " + d.getStack() + } + + err := d.writeLog.Log("%s %s%s\n", d.timestamp(), blk.Cid(), stack) + if err != nil { + log.Warnf("error writing write log: %s", err) + } +} + +func (d *debugLog) LogWriteMany(blks []blocks.Block) { + if d == nil { + return + } + + var stack string + if enableDebugLogWriteTraces { + stack = " " + d.getStack() + } + + now := d.timestamp() + for _, blk := range blks { + err := d.writeLog.Log("%s %s%s\n", now, blk.Cid(), stack) + if err != nil { + log.Warnf("error writing write log: %s", err) + break + } + } +} + +func (d *debugLog) LogDelete(cids []cid.Cid) { + if d == nil { + return + } + + now := d.timestamp() + for _, c := range cids { + err := d.deleteLog.Log("%s %s\n", now, c) + if err != nil { + log.Warnf("error writing delete log: %s", err) + break + } + } +} + +func (d *debugLog) Flush() { + if d == nil { + return + } + + // rotate non-empty logs + d.readLog.Rotate() + d.writeLog.Rotate() + d.deleteLog.Rotate() + d.stackLog.Rotate() +} + +func (d *debugLog) Close() error { + if d == nil { + return nil + } + + err1 := d.readLog.Close() + err2 := d.writeLog.Close() + err3 := d.deleteLog.Close() + err4 := d.stackLog.Close() + + return multierr.Combine(err1, err2, err3, err4) +} + +func (d *debugLog) getStack() string { + sk := d.getNormalizedStackTrace() + hash := sha256.Sum256([]byte(sk)) + key := string(hash[:]) + + d.stackMx.Lock() + repr, ok := d.stackMap[key] + if !ok { + repr = hex.EncodeToString(hash[:]) + d.stackMap[key] = repr + + err := d.stackLog.Log("%s\n%s\n", repr, sk) + if err != nil { + log.Warnf("error writing stack trace for %s: %s", repr, err) + } + } + d.stackMx.Unlock() + + return repr +} + +func (d *debugLog) getNormalizedStackTrace() string { + sk := string(debug.Stack()) + + // Normalization for deduplication + // skip first line -- it's the goroutine + // for each line that ends in a ), remove the call args -- these are the registers + lines := strings.Split(sk, "\n")[1:] + for i, line := range lines { + if len(line) > 0 && line[len(line)-1] == ')' { + idx := strings.LastIndex(line, "(") + if idx < 0 { + continue + } + lines[i] = line[:idx] + } + } + + return strings.Join(lines, "\n") +} + +func (d *debugLog) timestamp() string { + ts, _ := time.Now().MarshalText() + return string(ts) +} + +func openDebugLogOp(basePath, name string) (*debugLogOp, error) { + path := filepath.Join(basePath, name) + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + return nil, xerrors.Errorf("error opening %s: %w", name, err) + } + + return &debugLogOp{path: path, log: file}, nil +} + +func (d *debugLogOp) Close() error { + d.mx.Lock() + defer d.mx.Unlock() + + return d.log.Close() +} + +func (d *debugLogOp) Log(template string, arg ...interface{}) error { + d.mx.Lock() + defer d.mx.Unlock() + + d.count++ + _, err := fmt.Fprintf(d.log, template, arg...) + return err +} + +func (d *debugLogOp) Rotate() { + d.mx.Lock() + defer d.mx.Unlock() + + if d.count == 0 { + return + } + + err := d.log.Close() + if err != nil { + log.Warnf("error closing log (file: %s): %s", d.path, err) + return + } + + arxivPath := fmt.Sprintf("%s-%d", d.path, time.Now().Unix()) + err = os.Rename(d.path, arxivPath) + if err != nil { + log.Warnf("error moving log (file: %s): %s", d.path, err) + return + } + + go func() { + cmd := exec.Command("gzip", arxivPath) + err := cmd.Run() + if err != nil { + log.Warnf("error compressing log (file: %s): %s", arxivPath, err) + } + }() + + d.count = 0 + d.log, err = os.OpenFile(d.path, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + log.Warnf("error opening log (file: %s): %s", d.path, err) + return + } +} diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go index ef14a2fc6..a644e7279 100644 --- a/blockstore/splitstore/markset.go +++ b/blockstore/splitstore/markset.go @@ -1,26 +1,26 @@ package splitstore import ( - "path/filepath" + "errors" "golang.org/x/xerrors" cid "github.com/ipfs/go-cid" ) +var errMarkSetClosed = errors.New("markset closed") + // MarkSet is a utility to keep track of seen CID, and later query for them. // // * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt). -// * If a probabilistic result is acceptable, it can be backed by a bloom filter (default). +// * If a probabilistic result is acceptable, it can be backed by a bloom filter type MarkSet interface { Mark(cid.Cid) error Has(cid.Cid) (bool, error) Close() error + SetConcurrent() } -// markBytes is deliberately a non-nil empty byte slice for serialization. -var markBytes = []byte{} - type MarkSetEnv interface { Create(name string, sizeHint int64) (MarkSet, error) Close() error @@ -28,10 +28,10 @@ type MarkSetEnv interface { func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { switch mtype { - case "", "bloom": + case "bloom": return NewBloomMarkSetEnv() - case "bolt": - return NewBoltMarkSetEnv(filepath.Join(path, "markset.bolt")) + case "map": + return NewMapMarkSetEnv() default: return nil, xerrors.Errorf("unknown mark set type %s", mtype) } diff --git a/blockstore/splitstore/markset_bloom.go b/blockstore/splitstore/markset_bloom.go index c213436c8..9261de7c7 100644 --- a/blockstore/splitstore/markset_bloom.go +++ b/blockstore/splitstore/markset_bloom.go @@ -3,6 +3,7 @@ package splitstore import ( "crypto/rand" "crypto/sha256" + "sync" "golang.org/x/xerrors" @@ -21,7 +22,9 @@ var _ MarkSetEnv = (*BloomMarkSetEnv)(nil) type BloomMarkSet struct { salt []byte + mx sync.RWMutex bf *bbloom.Bloom + ts bool } var _ MarkSet = (*BloomMarkSet)(nil) @@ -64,14 +67,41 @@ func (s *BloomMarkSet) saltedKey(cid cid.Cid) []byte { } func (s *BloomMarkSet) Mark(cid cid.Cid) error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + + if s.bf == nil { + return errMarkSetClosed + } + s.bf.Add(s.saltedKey(cid)) return nil } func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) { + if s.ts { + s.mx.RLock() + defer s.mx.RUnlock() + } + + if s.bf == nil { + return false, errMarkSetClosed + } + return s.bf.Has(s.saltedKey(cid)), nil } func (s *BloomMarkSet) Close() error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + s.bf = nil return nil } + +func (s *BloomMarkSet) SetConcurrent() { + s.ts = true +} diff --git a/blockstore/splitstore/markset_bolt.go b/blockstore/splitstore/markset_bolt.go deleted file mode 100644 index cab0dd74a..000000000 --- a/blockstore/splitstore/markset_bolt.go +++ /dev/null @@ -1,81 +0,0 @@ -package splitstore - -import ( - "time" - - "golang.org/x/xerrors" - - cid "github.com/ipfs/go-cid" - bolt "go.etcd.io/bbolt" -) - -type BoltMarkSetEnv struct { - db *bolt.DB -} - -var _ MarkSetEnv = (*BoltMarkSetEnv)(nil) - -type BoltMarkSet struct { - db *bolt.DB - bucketId []byte -} - -var _ MarkSet = (*BoltMarkSet)(nil) - -func NewBoltMarkSetEnv(path string) (*BoltMarkSetEnv, error) { - db, err := bolt.Open(path, 0644, - &bolt.Options{ - Timeout: 1 * time.Second, - NoSync: true, - }) - if err != nil { - return nil, err - } - - return &BoltMarkSetEnv{db: db}, nil -} - -func (e *BoltMarkSetEnv) Create(name string, hint int64) (MarkSet, error) { - bucketId := []byte(name) - err := e.db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(bucketId) - if err != nil { - return xerrors.Errorf("error creating bolt db bucket %s: %w", name, err) - } - return nil - }) - - if err != nil { - return nil, err - } - - return &BoltMarkSet{db: e.db, bucketId: bucketId}, nil -} - -func (e *BoltMarkSetEnv) Close() error { - return e.db.Close() -} - -func (s *BoltMarkSet) Mark(cid cid.Cid) error { - return s.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.Put(cid.Hash(), markBytes) - }) -} - -func (s *BoltMarkSet) Has(cid cid.Cid) (result bool, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - v := b.Get(cid.Hash()) - result = v != nil - return nil - }) - - return result, err -} - -func (s *BoltMarkSet) Close() error { - return s.db.Update(func(tx *bolt.Tx) error { - return tx.DeleteBucket(s.bucketId) - }) -} diff --git a/blockstore/splitstore/markset_map.go b/blockstore/splitstore/markset_map.go new file mode 100644 index 000000000..197c82424 --- /dev/null +++ b/blockstore/splitstore/markset_map.go @@ -0,0 +1,75 @@ +package splitstore + +import ( + "sync" + + cid "github.com/ipfs/go-cid" +) + +type MapMarkSetEnv struct{} + +var _ MarkSetEnv = (*MapMarkSetEnv)(nil) + +type MapMarkSet struct { + mx sync.RWMutex + set map[string]struct{} + + ts bool +} + +var _ MarkSet = (*MapMarkSet)(nil) + +func NewMapMarkSetEnv() (*MapMarkSetEnv, error) { + return &MapMarkSetEnv{}, nil +} + +func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { + return &MapMarkSet{ + set: make(map[string]struct{}, sizeHint), + }, nil +} + +func (e *MapMarkSetEnv) Close() error { + return nil +} + +func (s *MapMarkSet) Mark(cid cid.Cid) error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + + if s.set == nil { + return errMarkSetClosed + } + + s.set[string(cid.Hash())] = struct{}{} + return nil +} + +func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { + if s.ts { + s.mx.RLock() + defer s.mx.RUnlock() + } + + if s.set == nil { + return false, errMarkSetClosed + } + + _, ok := s.set[string(cid.Hash())] + return ok, nil +} + +func (s *MapMarkSet) Close() error { + if s.ts { + s.mx.Lock() + defer s.mx.Unlock() + } + s.set = nil + return nil +} + +func (s *MapMarkSet) SetConcurrent() { + s.ts = true +} diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go index 367ab8d06..d5c01e220 100644 --- a/blockstore/splitstore/markset_test.go +++ b/blockstore/splitstore/markset_test.go @@ -8,8 +8,8 @@ import ( "github.com/multiformats/go-multihash" ) -func TestBoltMarkSet(t *testing.T) { - testMarkSet(t, "bolt") +func TestMapMarkSet(t *testing.T) { + testMarkSet(t, "map") } func TestBloomMarkSet(t *testing.T) { diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 23b2d3427..5ac7a1c94 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -2,8 +2,8 @@ package splitstore import ( "context" - "encoding/binary" "errors" + "os" "sync" "sync/atomic" "time" @@ -17,41 +17,13 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/go-state-types/abi" - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/metrics" "go.opencensus.io/stats" ) -var ( - // CompactionThreshold is the number of epochs that need to have elapsed - // from the previously compacted epoch to trigger a new compaction. - // - // |················· CompactionThreshold ··················| - // | | - // =======‖≡≡≡≡≡≡≡‖-----------------------|------------------------» - // | | | chain --> ↑__ current epoch - // |·······| | - // ↑________ CompactionCold ↑________ CompactionBoundary - // - // === :: cold (already archived) - // ≡≡≡ :: to be archived in this compaction - // --- :: hot - CompactionThreshold = 5 * build.Finality - - // CompactionCold is the number of epochs that will be archived to the - // cold store on compaction. See diagram on CompactionThreshold for a - // better sense. - CompactionCold = build.Finality - - // CompactionBoundary is the number of epochs from the current epoch at which - // we will walk the chain for live objects - CompactionBoundary = 2 * build.Finality -) - var ( // baseEpochKey stores the base epoch (last compaction epoch) in the // metadata store. @@ -66,37 +38,39 @@ var ( // this is first computed at warmup and updated in every compaction markSetSizeKey = dstore.NewKey("/splitstore/markSetSize") + // compactionIndexKey stores the compaction index (serial number) + compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex") + log = logging.Logger("splitstore") + + // set this to true if you are debugging the splitstore to enable debug logging + enableDebugLog = false + // set this to true if you want to track origin stack traces in the write log + enableDebugLogWriteTraces = false ) -const ( - batchSize = 16384 +func init() { + if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" { + enableDebugLog = true + } - defaultColdPurgeSize = 7_000_000 - defaultDeadPurgeSize = 1_000_000 -) + if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" { + enableDebugLogWriteTraces = true + } +} type Config struct { - // TrackingStore is the type of tracking store to use. - // - // Supported values are: "bolt" (default if omitted), "mem" (for tests and readonly access). - TrackingStoreType string - // MarkSetType is the type of mark set to use. // - // Supported values are: "bloom" (default if omitted), "bolt". + // Only current sane value is "map", but we may add an option for a disk-backed + // markset for memory-constrained situations. MarkSetType string - // perform full reachability analysis (expensive) for compaction - // You should enable this option if you plan to use the splitstore without a backing coldstore - EnableFullCompaction bool - // EXPERIMENTAL enable pruning of unreachable objects. - // This has not been sufficiently tested yet; only enable if you know what you are doing. - // Only applies if you enable full compaction. - EnableGC bool - // full archival nodes should enable this if EnableFullCompaction is enabled - // do NOT enable this if you synced from a snapshot. - // Only applies if you enabled full compaction - Archival bool + + // DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore. + // If the splitstore is running with a noop coldstore then this option is set to true + // which skips moving (as it is a noop, but still takes time to read all the cold objects) + // and directly purges cold blocks. + DiscardColdBlocks bool } // ChainAccessor allows the Splitstore to access the chain. It will most likely @@ -105,37 +79,58 @@ type ChainAccessor interface { GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error) GetHeaviestTipSet() *types.TipSet SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) - WalkSnapshot(context.Context, *types.TipSet, abi.ChainEpoch, bool, bool, func(cid.Cid) error) error +} + +// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension +// of the Blockstore interface with the traits we need for compaction. +type hotstore interface { + bstore.Blockstore + bstore.BlockstoreIterator } type SplitStore struct { - compacting int32 // compaction (or warmp up) in progress - critsection int32 // compaction critical section - closing int32 // the split store is closing + compacting int32 // compaction/prune/warmup in progress + closing int32 // the splitstore is closing - fullCompaction bool - enableGC bool - skipOldMsgs bool - skipMsgReceipts bool + cfg *Config - baseEpoch abi.ChainEpoch - warmupEpoch abi.ChainEpoch + mx sync.Mutex + warmupEpoch abi.ChainEpoch // protected by mx + baseEpoch abi.ChainEpoch // protected by compaction lock + + headChangeMx sync.Mutex coldPurgeSize int - deadPurgeSize int - mx sync.Mutex - curTs *types.TipSet - - chain ChainAccessor - ds dstore.Datastore - hot bstore.Blockstore - cold bstore.Blockstore - tracker TrackingStore - - env MarkSetEnv + chain ChainAccessor + ds dstore.Datastore + cold bstore.Blockstore + hot hotstore + markSetEnv MarkSetEnv markSetSize int64 + + compactionIndex int64 + + ctx context.Context + cancel func() + + debug *debugLog + + // transactional protection for concurrent read/writes during compaction + txnLk sync.RWMutex + txnViewsMx sync.Mutex + txnViewsCond sync.Cond + txnViews int + txnViewsWaiting bool + txnActive bool + txnProtect MarkSet + txnRefsMx sync.Mutex + txnRefs map[cid.Cid]struct{} + txnMissing map[cid.Cid]struct{} + + // registered protectors + protectors []func(func(cid.Cid) error) error } var _ bstore.Blockstore = (*SplitStore)(nil) @@ -144,37 +139,42 @@ var _ bstore.Blockstore = (*SplitStore)(nil) // is backed by the provided hot and cold stores. The returned SplitStore MUST be // attached to the ChainStore with Start in order to trigger compaction. func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) { - // the tracking store - tracker, err := OpenTrackingStore(path, cfg.TrackingStoreType) - if err != nil { - return nil, err + // hot blockstore must support the hotstore interface + hots, ok := hot.(hotstore) + if !ok { + // be specific about what is missing + if _, ok := hot.(bstore.BlockstoreIterator); !ok { + return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot) + } + + return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot) } // the markset env - env, err := OpenMarkSetEnv(path, cfg.MarkSetType) + markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType) if err != nil { - _ = tracker.Close() return nil, err } // and now we can make a SplitStore ss := &SplitStore{ - ds: ds, - hot: hot, - cold: cold, - tracker: tracker, - env: env, - - fullCompaction: cfg.EnableFullCompaction, - enableGC: cfg.EnableGC, - skipOldMsgs: !(cfg.EnableFullCompaction && cfg.Archival), - skipMsgReceipts: !(cfg.EnableFullCompaction && cfg.Archival), + cfg: cfg, + ds: ds, + cold: cold, + hot: hots, + markSetEnv: markSetEnv, coldPurgeSize: defaultColdPurgeSize, } - if cfg.EnableGC { - ss.deadPurgeSize = defaultDeadPurgeSize + ss.txnViewsCond.L = &ss.txnViewsMx + ss.ctx, ss.cancel = context.WithCancel(context.Background()) + + if enableDebugLog { + ss.debug, err = openDebugLog(path) + if err != nil { + return nil, err + } } return ss, nil @@ -192,26 +192,56 @@ func (s *SplitStore) DeleteMany(_ []cid.Cid) error { } func (s *SplitStore) Has(cid cid.Cid) (bool, error) { + if isIdentiyCid(cid) { + return true, nil + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + has, err := s.hot.Has(cid) - if err != nil || has { + if err != nil { return has, err } + if has { + s.trackTxnRef(cid) + return true, nil + } + return s.cold.Has(cid) } func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, cid) + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + blk, err := s.hot.Get(cid) switch err { case nil: + s.trackTxnRef(cid) return blk, nil case bstore.ErrNotFound: + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + blk, err = s.cold.Get(cid) if err == nil { - stats.Record(context.Background(), metrics.SplitstoreMiss.M(1)) + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + } return blk, err @@ -221,16 +251,33 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { } func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return 0, err + } + + return len(data), nil + } + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + size, err := s.hot.GetSize(cid) switch err { case nil: + s.trackTxnRef(cid) return size, nil case bstore.ErrNotFound: + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + size, err = s.cold.GetSize(cid) if err == nil { - stats.Record(context.Background(), metrics.SplitstoreMiss.M(1)) + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } return size, err @@ -240,46 +287,67 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { } func (s *SplitStore) Put(blk blocks.Block) error { - s.mx.Lock() - if s.curTs == nil { - s.mx.Unlock() - return s.cold.Put(blk) + if isIdentiyCid(blk.Cid()) { + return nil } - epoch := s.curTs.Height() - s.mx.Unlock() + s.txnLk.RLock() + defer s.txnLk.RUnlock() - err := s.tracker.Put(blk.Cid(), epoch) + err := s.hot.Put(blk) if err != nil { - log.Errorf("error tracking CID in hotstore: %s; falling back to coldstore", err) - return s.cold.Put(blk) + return err } - return s.hot.Put(blk) + s.debug.LogWrite(blk) + + s.trackTxnRef(blk.Cid()) + return nil } func (s *SplitStore) PutMany(blks []blocks.Block) error { - s.mx.Lock() - if s.curTs == nil { - s.mx.Unlock() - return s.cold.PutMany(blks) + // filter identites + idcids := 0 + for _, blk := range blks { + if isIdentiyCid(blk.Cid()) { + idcids++ + } } - epoch := s.curTs.Height() - s.mx.Unlock() + if idcids > 0 { + if idcids == len(blks) { + // it's all identities + return nil + } + + filtered := make([]blocks.Block, 0, len(blks)-idcids) + for _, blk := range blks { + if isIdentiyCid(blk.Cid()) { + continue + } + filtered = append(filtered, blk) + } + + blks = filtered + } batch := make([]cid.Cid, 0, len(blks)) for _, blk := range blks { batch = append(batch, blk.Cid()) } - err := s.tracker.PutBatch(batch, epoch) + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + err := s.hot.PutMany(blks) if err != nil { - log.Errorf("error tracking CIDs in hotstore: %s; falling back to coldstore", err) - return s.cold.PutMany(blks) + return err } - return s.hot.PutMany(blks) + s.debug.LogWriteMany(blks) + + s.trackTxnRefMany(batch) + return nil } func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { @@ -297,15 +365,21 @@ func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return nil, err } - ch := make(chan cid.Cid) + seen := cid.NewSet() + ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches go func() { defer cancel() defer close(ch) for _, in := range []<-chan cid.Cid{chHot, chCold} { - for cid := range in { + for c := range in { + // ensure we only emit each key once + if !seen.Visit(c) { + continue + } + select { - case ch <- cid: + case ch <- c: case <-ctx.Done(): return } @@ -322,20 +396,57 @@ func (s *SplitStore) HashOnRead(enabled bool) { } func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { + if isIdentiyCid(cid) { + data, err := decodeIdentityCid(cid) + if err != nil { + return err + } + + return cb(data) + } + + // views are (optimistically) protected two-fold: + // - if there is an active transaction, then the reference is protected. + // - if there is no active transaction, active views are tracked in a + // wait group and compaction is inhibited from starting until they + // have all completed. this is necessary to ensure that a (very) long-running + // view can't have its data pointer deleted, which would be catastrophic. + // Note that we can't just RLock for the duration of the view, as this could + // lead to deadlock with recursive views. + s.protectView(cid) + defer s.viewDone() + err := s.hot.View(cid, cb) switch err { case bstore.ErrNotFound: - return s.cold.View(cid, cb) + if s.isWarm() { + s.debug.LogReadMiss(cid) + } + + err = s.cold.View(cid, cb) + if err == nil { + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + } + return err default: return err } } +func (s *SplitStore) isWarm() bool { + s.mx.Lock() + defer s.mx.Unlock() + return s.warmupEpoch > 0 +} + // State tracking func (s *SplitStore) Start(chain ChainAccessor) error { s.chain = chain - s.curTs = chain.GetHeaviestTipSet() + curTs := chain.GetHeaviestTipSet() + + // should we warmup + warmup := false // load base epoch from metadata ds // if none, then use current epoch because it's a fresh start @@ -345,12 +456,12 @@ func (s *SplitStore) Start(chain ChainAccessor) error { s.baseEpoch = bytesToEpoch(bs) case dstore.ErrNotFound: - if s.curTs == nil { + if curTs == nil { // this can happen in some tests break } - err = s.setBaseEpoch(s.curTs.Height()) + err = s.setBaseEpoch(curTs.Height()) if err != nil { return xerrors.Errorf("error saving base epoch: %w", err) } @@ -360,20 +471,19 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load warmup epoch from metadata ds - // if none, then the splitstore will warm up the hotstore at first head change notif - // by walking the current tipset bs, err = s.ds.Get(warmupEpochKey) switch err { case nil: s.warmupEpoch = bytesToEpoch(bs) case dstore.ErrNotFound: + warmup = true + default: return xerrors.Errorf("error loading warmup epoch: %w", err) } - // load markSetSize from metadata ds - // if none, the splitstore will compute it during warmup and update in every compaction + // load markSetSize from metadata ds to provide a size hint for marksets bs, err = s.ds.Get(markSetSizeKey) switch err { case nil: @@ -384,663 +494,62 @@ func (s *SplitStore) Start(chain ChainAccessor) error { return xerrors.Errorf("error loading mark set size: %w", err) } + // load compactionIndex from metadata ds to provide a hint as to when to perform moving gc + bs, err = s.ds.Get(compactionIndexKey) + switch err { + case nil: + s.compactionIndex = bytesToInt64(bs) + + case dstore.ErrNotFound: + // this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has + // some issues with hot references leaking into the coldstore. + warmup = true + default: + return xerrors.Errorf("error loading compaction index: %w", err) + } + log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch) + if warmup { + err = s.warmup(curTs) + if err != nil { + return xerrors.Errorf("error starting warmup: %w", err) + } + } + // watch the chain chain.SubscribeHeadChanges(s.HeadChange) return nil } -func (s *SplitStore) Close() error { - atomic.StoreInt32(&s.closing, 1) +func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) { + s.mx.Lock() + defer s.mx.Unlock() - if atomic.LoadInt32(&s.critsection) == 1 { - log.Warn("ongoing compaction in critical section; waiting for it to finish...") - for atomic.LoadInt32(&s.critsection) == 1 { + s.protectors = append(s.protectors, protector) +} + +func (s *SplitStore) Close() error { + if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) { + // already closing + return nil + } + + if atomic.LoadInt32(&s.compacting) == 1 { + log.Warn("close with ongoing compaction in progress; waiting for it to finish...") + for atomic.LoadInt32(&s.compacting) == 1 { time.Sleep(time.Second) } } - return multierr.Combine(s.tracker.Close(), s.env.Close()) + s.cancel() + return multierr.Combine(s.markSetEnv.Close(), s.debug.Close()) } -func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { - s.mx.Lock() - curTs := apply[len(apply)-1] - epoch := curTs.Height() - s.curTs = curTs - s.mx.Unlock() - - if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { - // we are currently compacting, do nothing and wait for the next head change - return nil - } - - if s.warmupEpoch == 0 { - // splitstore needs to warm up - go func() { - defer atomic.StoreInt32(&s.compacting, 0) - - log.Info("warming up hotstore") - start := time.Now() - - s.warmup(curTs) - - log.Infow("warm up done", "took", time.Since(start)) - }() - - return nil - } - - if epoch-s.baseEpoch > CompactionThreshold { - // it's time to compact - go func() { - defer atomic.StoreInt32(&s.compacting, 0) - - log.Info("compacting splitstore") - start := time.Now() - - s.compact(curTs) - - log.Infow("compaction done", "took", time.Since(start)) - }() - } else { - // no compaction necessary - atomic.StoreInt32(&s.compacting, 0) - } - - return nil -} - -func (s *SplitStore) warmup(curTs *types.TipSet) { - epoch := curTs.Height() - - batchHot := make([]blocks.Block, 0, batchSize) - batchSnoop := make([]cid.Cid, 0, batchSize) - - count := int64(0) - err := s.chain.WalkSnapshot(context.Background(), curTs, 1, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - - has, err := s.hot.Has(cid) - if err != nil { - return err - } - - if has { - return nil - } - - blk, err := s.cold.Get(cid) - if err != nil { - return err - } - - batchHot = append(batchHot, blk) - batchSnoop = append(batchSnoop, cid) - - if len(batchHot) == batchSize { - err = s.tracker.PutBatch(batchSnoop, epoch) - if err != nil { - return err - } - batchSnoop = batchSnoop[:0] - - err = s.hot.PutMany(batchHot) - if err != nil { - return err - } - batchHot = batchHot[:0] - } - - return nil - }) - - if err != nil { - log.Errorf("error warming up splitstore: %s", err) - return - } - - if len(batchHot) > 0 { - err = s.tracker.PutBatch(batchSnoop, epoch) - if err != nil { - log.Errorf("error warming up splitstore: %s", err) - return - } - - err = s.hot.PutMany(batchHot) - if err != nil { - log.Errorf("error warming up splitstore: %s", err) - return - } - } - - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit - } - - // save the warmup epoch - s.warmupEpoch = epoch - err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) - if err != nil { - log.Errorf("error saving warmup epoch: %s", err) - } - - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) - if err != nil { - log.Errorf("error saving mark set size: %s", err) - } -} - -// Compaction/GC Algorithm -func (s *SplitStore) compact(curTs *types.TipSet) { - var err error - if s.markSetSize == 0 { - start := time.Now() - log.Info("estimating mark set size") - err = s.estimateMarkSetSize(curTs) - if err != nil { - log.Errorf("error estimating mark set size: %s; aborting compaction", err) - return - } - log.Infow("estimating mark set size done", "took", time.Since(start), "size", s.markSetSize) - } else { - log.Infow("current mark set size estimate", "size", s.markSetSize) - } - - start := time.Now() - if s.fullCompaction { - err = s.compactFull(curTs) - } else { - err = s.compactSimple(curTs) - } - took := time.Since(start).Milliseconds() - stats.Record(context.Background(), metrics.SplitstoreCompactionTimeSeconds.M(float64(took)/1e3)) - - if err != nil { - log.Errorf("COMPACTION ERROR: %s", err) - } -} - -func (s *SplitStore) estimateMarkSetSize(curTs *types.TipSet) error { - var count int64 - err := s.chain.WalkSnapshot(context.Background(), curTs, 1, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return nil - }) - - if err != nil { - return err - } - - s.markSetSize = count + count>>2 // overestimate a bit - return nil -} - -func (s *SplitStore) compactSimple(curTs *types.TipSet) error { - coldEpoch := s.baseEpoch + CompactionCold - currentEpoch := curTs.Height() - boundaryEpoch := currentEpoch - CompactionBoundary - - log.Infow("running simple compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "coldEpoch", coldEpoch, "boundaryEpoch", boundaryEpoch) - - coldSet, err := s.env.Create("cold", s.markSetSize) - if err != nil { - return xerrors.Errorf("error creating mark set: %w", err) - } - defer coldSet.Close() //nolint:errcheck - - // 1. mark reachable cold objects by looking at the objects reachable only from the cold epoch - log.Infow("marking reachable cold blocks", "boundaryEpoch", boundaryEpoch) - startMark := time.Now() - - boundaryTs, err := s.chain.GetTipsetByHeight(context.Background(), boundaryEpoch, curTs, true) - if err != nil { - return xerrors.Errorf("error getting tipset at boundary epoch: %w", err) - } - - var count int64 - err = s.chain.WalkSnapshot(context.Background(), boundaryTs, 1, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return coldSet.Mark(cid) - }) - - if err != nil { - return xerrors.Errorf("error marking cold blocks: %w", err) - } - - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit - } - - log.Infow("marking done", "took", time.Since(startMark)) - - // 2. move cold unreachable objects to the coldstore - log.Info("collecting cold objects") - startCollect := time.Now() - - cold := make([]cid.Cid, 0, s.coldPurgeSize) - - // some stats for logging - var hotCnt, coldCnt int - - // 2.1 iterate through the tracking store and collect unreachable cold objects - err = s.tracker.ForEach(func(cid cid.Cid, writeEpoch abi.ChainEpoch) error { - // is the object still hot? - if writeEpoch > coldEpoch { - // yes, stay in the hotstore - hotCnt++ - return nil - } - - // check whether it is reachable in the cold boundary - mark, err := coldSet.Has(cid) - if err != nil { - return xerrors.Errorf("error checkiing cold set for %s: %w", cid, err) - } - - if mark { - hotCnt++ - return nil - } - - // it's cold, mark it for move - cold = append(cold, cid) - coldCnt++ - return nil - }) - - if err != nil { - return xerrors.Errorf("error collecting cold objects: %w", err) - } - - if coldCnt > 0 { - s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit - } - - log.Infow("collection done", "took", time.Since(startCollect)) - log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) - stats.Record(context.Background(), metrics.SplitstoreCompactionHot.M(int64(hotCnt))) - stats.Record(context.Background(), metrics.SplitstoreCompactionCold.M(int64(coldCnt))) - - // Enter critical section - atomic.StoreInt32(&s.critsection, 1) - defer atomic.StoreInt32(&s.critsection, 0) - - // check to see if we are closing first; if that's the case just return +func (s *SplitStore) checkClosing() error { if atomic.LoadInt32(&s.closing) == 1 { - log.Info("splitstore is closing; aborting compaction") - return xerrors.Errorf("compaction aborted") - } - - // 2.2 copy the cold objects to the coldstore - log.Info("moving cold blocks to the coldstore") - startMove := time.Now() - err = s.moveColdBlocks(cold) - if err != nil { - return xerrors.Errorf("error moving cold blocks: %w", err) - } - log.Infow("moving done", "took", time.Since(startMove)) - - // 2.3 delete cold objects from the hotstore - log.Info("purging cold objects from the hotstore") - startPurge := time.Now() - err = s.purgeBlocks(cold) - if err != nil { - return xerrors.Errorf("error purging cold blocks: %w", err) - } - log.Infow("purging cold from hotstore done", "took", time.Since(startPurge)) - - // 2.4 remove the tracker tracking for cold objects - startPurge = time.Now() - log.Info("purging cold objects from tracker") - err = s.purgeTracking(cold) - if err != nil { - return xerrors.Errorf("error purging tracking for cold blocks: %w", err) - } - log.Infow("purging cold from tracker done", "took", time.Since(startPurge)) - - // we are done; do some housekeeping - err = s.tracker.Sync() - if err != nil { - return xerrors.Errorf("error syncing tracker: %w", err) - } - - s.gcHotstore() - - err = s.setBaseEpoch(coldEpoch) - if err != nil { - return xerrors.Errorf("error saving base epoch: %w", err) - } - - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) - if err != nil { - return xerrors.Errorf("error saving mark set size: %w", err) - } - - return nil -} - -func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { - batch := make([]blocks.Block, 0, batchSize) - - for _, cid := range cold { - blk, err := s.hot.Get(cid) - if err != nil { - if err == dstore.ErrNotFound { - // this can happen if the node is killed after we have deleted the block from the hotstore - // but before we have deleted it from the tracker; just delete the tracker. - err = s.tracker.Delete(cid) - if err != nil { - return xerrors.Errorf("error deleting unreachable cid %s from tracker: %w", cid, err) - } - } else { - return xerrors.Errorf("error retrieving tracked block %s from hotstore: %w", cid, err) - } - - continue - } - - batch = append(batch, blk) - if len(batch) == batchSize { - err = s.cold.PutMany(batch) - if err != nil { - return xerrors.Errorf("error putting batch to coldstore: %w", err) - } - batch = batch[:0] - } - } - - if len(batch) > 0 { - err := s.cold.PutMany(batch) - if err != nil { - return xerrors.Errorf("error putting cold to coldstore: %w", err) - } - } - - return nil -} - -func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { - if len(cids) == 0 { - return nil - } - - // don't delete one giant batch of 7M objects, but rather do smaller batches - done := false - for i := 0; !done; i++ { - start := i * batchSize - end := start + batchSize - if end >= len(cids) { - end = len(cids) - done = true - } - - err := deleteBatch(cids[start:end]) - if err != nil { - return xerrors.Errorf("error deleting batch: %w", err) - } - } - - return nil -} - -func (s *SplitStore) purgeBlocks(cids []cid.Cid) error { - return s.purgeBatch(cids, s.hot.DeleteMany) -} - -func (s *SplitStore) purgeTracking(cids []cid.Cid) error { - return s.purgeBatch(cids, s.tracker.DeleteBatch) -} - -func (s *SplitStore) gcHotstore() { - if compact, ok := s.hot.(interface{ Compact() error }); ok { - log.Infof("compacting hotstore") - startCompact := time.Now() - err := compact.Compact() - if err != nil { - log.Warnf("error compacting hotstore: %s", err) - return - } - log.Infow("hotstore compaction done", "took", time.Since(startCompact)) - } - - if gc, ok := s.hot.(interface{ CollectGarbage() error }); ok { - log.Infof("garbage collecting hotstore") - startGC := time.Now() - err := gc.CollectGarbage() - if err != nil { - log.Warnf("error garbage collecting hotstore: %s", err) - return - } - log.Infow("hotstore garbage collection done", "took", time.Since(startGC)) - } -} - -func (s *SplitStore) compactFull(curTs *types.TipSet) error { - currentEpoch := curTs.Height() - coldEpoch := s.baseEpoch + CompactionCold - boundaryEpoch := currentEpoch - CompactionBoundary - - log.Infow("running full compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "coldEpoch", coldEpoch, "boundaryEpoch", boundaryEpoch) - - // create two mark sets, one for marking the cold finality region - // and one for marking the hot region - hotSet, err := s.env.Create("hot", s.markSetSize) - if err != nil { - return xerrors.Errorf("error creating hot mark set: %w", err) - } - defer hotSet.Close() //nolint:errcheck - - coldSet, err := s.env.Create("cold", s.markSetSize) - if err != nil { - return xerrors.Errorf("error creating cold mark set: %w", err) - } - defer coldSet.Close() //nolint:errcheck - - // Phase 1: marking - log.Info("marking live blocks") - startMark := time.Now() - - // Phase 1a: mark all reachable CIDs in the hot range - boundaryTs, err := s.chain.GetTipsetByHeight(context.Background(), boundaryEpoch, curTs, true) - if err != nil { - return xerrors.Errorf("error getting tipset at boundary epoch: %w", err) - } - - count := int64(0) - err = s.chain.WalkSnapshot(context.Background(), boundaryTs, boundaryEpoch-coldEpoch, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return hotSet.Mark(cid) - }) - - if err != nil { - return xerrors.Errorf("error marking hot blocks: %w", err) - } - - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit - } - - // Phase 1b: mark all reachable CIDs in the cold range - coldTs, err := s.chain.GetTipsetByHeight(context.Background(), coldEpoch, curTs, true) - if err != nil { - return xerrors.Errorf("error getting tipset at cold epoch: %w", err) - } - - count = 0 - err = s.chain.WalkSnapshot(context.Background(), coldTs, CompactionCold, s.skipOldMsgs, s.skipMsgReceipts, - func(cid cid.Cid) error { - count++ - return coldSet.Mark(cid) - }) - - if err != nil { - return xerrors.Errorf("error marking cold blocks: %w", err) - } - - if count > s.markSetSize { - s.markSetSize = count + count>>2 // overestimate a bit - } - - log.Infow("marking done", "took", time.Since(startMark)) - - // Phase 2: sweep cold objects: - // - If a cold object is reachable in the hot range, it stays in the hotstore. - // - If a cold object is reachable in the cold range, it is moved to the coldstore. - // - If a cold object is unreachable, it is deleted if GC is enabled, otherwise moved to the coldstore. - log.Info("collecting cold objects") - startCollect := time.Now() - - // some stats for logging - var hotCnt, coldCnt, deadCnt int - - cold := make([]cid.Cid, 0, s.coldPurgeSize) - dead := make([]cid.Cid, 0, s.deadPurgeSize) - - // 2.1 iterate through the tracker and collect cold and dead objects - err = s.tracker.ForEach(func(cid cid.Cid, wrEpoch abi.ChainEpoch) error { - // is the object stil hot? - if wrEpoch > coldEpoch { - // yes, stay in the hotstore - hotCnt++ - return nil - } - - // the object is cold -- check whether it is reachable in the hot range - mark, err := hotSet.Has(cid) - if err != nil { - return xerrors.Errorf("error checking live mark for %s: %w", cid, err) - } - - if mark { - // the object is reachable in the hot range, stay in the hotstore - hotCnt++ - return nil - } - - // check whether it is reachable in the cold range - mark, err = coldSet.Has(cid) - if err != nil { - return xerrors.Errorf("error checkiing cold set for %s: %w", cid, err) - } - - if s.enableGC { - if mark { - // the object is reachable in the cold range, move it to the cold store - cold = append(cold, cid) - coldCnt++ - } else { - // the object is dead and will be deleted - dead = append(dead, cid) - deadCnt++ - } - } else { - // if GC is disabled, we move both cold and dead objects to the coldstore - cold = append(cold, cid) - if mark { - coldCnt++ - } else { - deadCnt++ - } - } - - return nil - }) - - if err != nil { - return xerrors.Errorf("error collecting cold objects: %w", err) - } - - if coldCnt > 0 { - s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit - } - if deadCnt > 0 { - s.deadPurgeSize = deadCnt + deadCnt>>2 // overestimate a bit - } - - log.Infow("collection done", "took", time.Since(startCollect)) - log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "dead", deadCnt) - stats.Record(context.Background(), metrics.SplitstoreCompactionHot.M(int64(hotCnt))) - stats.Record(context.Background(), metrics.SplitstoreCompactionCold.M(int64(coldCnt))) - stats.Record(context.Background(), metrics.SplitstoreCompactionDead.M(int64(deadCnt))) - - // Enter critical section - atomic.StoreInt32(&s.critsection, 1) - defer atomic.StoreInt32(&s.critsection, 0) - - // check to see if we are closing first; if that's the case just return - if atomic.LoadInt32(&s.closing) == 1 { - log.Info("splitstore is closing; aborting compaction") - return xerrors.Errorf("compaction aborted") - } - - // 2.2 copy the cold objects to the coldstore - log.Info("moving cold objects to the coldstore") - startMove := time.Now() - err = s.moveColdBlocks(cold) - if err != nil { - return xerrors.Errorf("error moving cold blocks: %w", err) - } - log.Infow("moving done", "took", time.Since(startMove)) - - // 2.3 delete cold objects from the hotstore - log.Info("purging cold objects from the hotstore") - startPurge := time.Now() - err = s.purgeBlocks(cold) - if err != nil { - return xerrors.Errorf("error purging cold blocks: %w", err) - } - log.Infow("purging cold from hotstore done", "took", time.Since(startPurge)) - - // 2.4 remove the tracker tracking for cold objects - startPurge = time.Now() - log.Info("purging cold objects from tracker") - err = s.purgeTracking(cold) - if err != nil { - return xerrors.Errorf("error purging tracking for cold blocks: %w", err) - } - log.Infow("purging cold from tracker done", "took", time.Since(startPurge)) - - // 3. if we have dead objects, delete them from the hotstore and remove the tracking - if len(dead) > 0 { - log.Info("deleting dead objects") - err = s.purgeBlocks(dead) - if err != nil { - return xerrors.Errorf("error purging dead blocks: %w", err) - } - - // remove the tracker tracking - startPurge := time.Now() - log.Info("purging dead objects from tracker") - err = s.purgeTracking(dead) - if err != nil { - return xerrors.Errorf("error purging tracking for dead blocks: %w", err) - } - log.Infow("purging dead from tracker done", "took", time.Since(startPurge)) - } - - // we are done; do some housekeeping - err = s.tracker.Sync() - if err != nil { - return xerrors.Errorf("error syncing tracker: %w", err) - } - - s.gcHotstore() - - err = s.setBaseEpoch(coldEpoch) - if err != nil { - return xerrors.Errorf("error saving base epoch: %w", err) - } - - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) - if err != nil { - return xerrors.Errorf("error saving mark set size: %w", err) + return xerrors.Errorf("splitstore is closing") } return nil @@ -1048,33 +557,5 @@ func (s *SplitStore) compactFull(curTs *types.TipSet) error { func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error { s.baseEpoch = epoch - // write to datastore return s.ds.Put(baseEpochKey, epochToBytes(epoch)) } - -func epochToBytes(epoch abi.ChainEpoch) []byte { - return uint64ToBytes(uint64(epoch)) -} - -func bytesToEpoch(buf []byte) abi.ChainEpoch { - return abi.ChainEpoch(bytesToUint64(buf)) -} - -func int64ToBytes(i int64) []byte { - return uint64ToBytes(uint64(i)) -} - -func bytesToInt64(buf []byte) int64 { - return int64(bytesToUint64(buf)) -} - -func uint64ToBytes(i uint64) []byte { - buf := make([]byte, 16) - n := binary.PutUvarint(buf, i) - return buf[:n] -} - -func bytesToUint64(buf []byte) uint64 { - i, _ := binary.Uvarint(buf) - return i -} diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go new file mode 100644 index 000000000..d3d87bca1 --- /dev/null +++ b/blockstore/splitstore/splitstore_compact.go @@ -0,0 +1,1126 @@ +package splitstore + +import ( + "bytes" + "errors" + "runtime" + "sort" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/metrics" + + "go.opencensus.io/stats" +) + +var ( + // CompactionThreshold is the number of epochs that need to have elapsed + // from the previously compacted epoch to trigger a new compaction. + // + // |················· CompactionThreshold ··················| + // | | + // =======‖≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡‖------------------------» + // | | chain --> ↑__ current epoch + // | archived epochs ___↑ + // ↑________ CompactionBoundary + // + // === :: cold (already archived) + // ≡≡≡ :: to be archived in this compaction + // --- :: hot + CompactionThreshold = 5 * build.Finality + + // CompactionBoundary is the number of epochs from the current epoch at which + // we will walk the chain for live objects. + CompactionBoundary = 4 * build.Finality + + // SyncGapTime is the time delay from a tipset's min timestamp before we decide + // there is a sync gap + SyncGapTime = time.Minute +) + +var ( + // used to signal end of walk + errStopWalk = errors.New("stop walk") +) + +const ( + batchSize = 16384 + + defaultColdPurgeSize = 7_000_000 +) + +func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { + s.headChangeMx.Lock() + defer s.headChangeMx.Unlock() + + // Revert only. + if len(apply) == 0 { + return nil + } + + curTs := apply[len(apply)-1] + epoch := curTs.Height() + + // NOTE: there is an implicit invariant assumption that HeadChange is invoked + // synchronously and no other HeadChange can be invoked while one is in + // progress. + // this is guaranteed by the chainstore, and it is pervasive in all lotus + // -- if that ever changes then all hell will break loose in general and + // we will have a rance to protectTipSets here. + // Reagrdless, we put a mutex in HeadChange just to be safe + + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + // we are currently compacting -- protect the new tipset(s) + s.protectTipSets(apply) + return nil + } + + // check if we are actually closing first + if atomic.LoadInt32(&s.closing) == 1 { + atomic.StoreInt32(&s.compacting, 0) + return nil + } + + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + if time.Since(timestamp) > SyncGapTime { + // don't attempt compaction before we have caught up syncing + atomic.StoreInt32(&s.compacting, 0) + return nil + } + + if epoch-s.baseEpoch > CompactionThreshold { + // it's time to compact -- prepare the transaction and go! + s.beginTxnProtect() + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + defer s.endTxnProtect() + + log.Info("compacting splitstore") + start := time.Now() + + s.compact(curTs) + + log.Infow("compaction done", "took", time.Since(start)) + }() + } else { + // no compaction necessary + atomic.StoreInt32(&s.compacting, 0) + } + + return nil +} + +// transactionally protect incoming tipsets +func (s *SplitStore) protectTipSets(apply []*types.TipSet) { + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + if !s.txnActive { + return + } + + var cids []cid.Cid + for _, ts := range apply { + cids = append(cids, ts.Cids()...) + } + + s.trackTxnRefMany(cids) +} + +// transactionally protect a view +func (s *SplitStore) protectView(c cid.Cid) { + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + if s.txnActive { + s.trackTxnRef(c) + } + + s.txnViewsMx.Lock() + s.txnViews++ + s.txnViewsMx.Unlock() +} + +func (s *SplitStore) viewDone() { + s.txnViewsMx.Lock() + defer s.txnViewsMx.Unlock() + + s.txnViews-- + if s.txnViews == 0 && s.txnViewsWaiting { + s.txnViewsCond.Broadcast() + } +} + +func (s *SplitStore) viewWait() { + s.txnViewsMx.Lock() + defer s.txnViewsMx.Unlock() + + s.txnViewsWaiting = true + for s.txnViews > 0 { + s.txnViewsCond.Wait() + } + s.txnViewsWaiting = false +} + +// transactionally protect a reference to an object +func (s *SplitStore) trackTxnRef(c cid.Cid) { + if !s.txnActive { + // not compacting + return + } + + if isUnitaryObject(c) { + return + } + + if s.txnProtect != nil { + mark, err := s.txnProtect.Has(c) + if err != nil { + log.Warnf("error checking markset: %s", err) + // track it anyways + } else if mark { + return + } + } + + s.txnRefsMx.Lock() + s.txnRefs[c] = struct{}{} + s.txnRefsMx.Unlock() +} + +// transactionally protect a batch of references +func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) { + if !s.txnActive { + // not compacting + return + } + + s.txnRefsMx.Lock() + defer s.txnRefsMx.Unlock() + + quiet := false + for _, c := range cids { + if isUnitaryObject(c) { + continue + } + + if s.txnProtect != nil { + mark, err := s.txnProtect.Has(c) + if err != nil { + if !quiet { + quiet = true + log.Warnf("error checking markset: %s", err) + } + // track it anyways + } + + if mark { + continue + } + } + + s.txnRefs[c] = struct{}{} + } + + return +} + +// protect all pending transactional references +func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { + for { + var txnRefs map[cid.Cid]struct{} + + s.txnRefsMx.Lock() + if len(s.txnRefs) > 0 { + txnRefs = s.txnRefs + s.txnRefs = make(map[cid.Cid]struct{}) + } + s.txnRefsMx.Unlock() + + if len(txnRefs) == 0 { + return nil + } + + log.Infow("protecting transactional references", "refs", len(txnRefs)) + count := 0 + workch := make(chan cid.Cid, len(txnRefs)) + startProtect := time.Now() + + for c := range txnRefs { + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset: %w", err) + } + + if mark { + continue + } + + workch <- c + count++ + } + close(workch) + + if count == 0 { + return nil + } + + workers := runtime.NumCPU() / 2 + if workers < 2 { + workers = 2 + } + if workers > count { + workers = count + } + + worker := func() error { + for c := range workch { + err := s.doTxnProtect(c, markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional references to %s: %w", c, err) + } + } + return nil + } + + g := new(errgroup.Group) + for i := 0; i < workers; i++ { + g.Go(worker) + } + + if err := g.Wait(); err != nil { + return err + } + + log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count) + } +} + +// transactionally protect a reference by walking the object and marking. +// concurrent markings are short circuited by checking the markset. +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { + if err := s.checkClosing(); err != nil { + return err + } + + // Note: cold objects are deleted heaviest first, so the consituents of an object + // cannot be deleted before the object itself. + return s.walkObjectIncomplete(root, cid.NewSet(), + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset: %w", err) + } + + // it's marked, nothing to do + if mark { + return errStopWalk + } + + return markSet.Mark(c) + }, + func(c cid.Cid) error { + if s.txnMissing != nil { + log.Warnf("missing object reference %s in %s", c, root) + s.txnRefsMx.Lock() + s.txnMissing[c] = struct{}{} + s.txnRefsMx.Unlock() + } + return errStopWalk + }) +} + +func (s *SplitStore) applyProtectors() error { + s.mx.Lock() + defer s.mx.Unlock() + + count := 0 + for _, protect := range s.protectors { + err := protect(func(c cid.Cid) error { + s.trackTxnRef(c) + count++ + return nil + }) + + if err != nil { + return xerrors.Errorf("error applynig protector: %w", err) + } + } + + if count > 0 { + log.Infof("protected %d references through %d protectors", count, len(s.protectors)) + } + + return nil +} + +// --- Compaction --- +// Compaction works transactionally with the following algorithm: +// - We prepare a transaction, whereby all i/o referenced objects through the API are tracked. +// - We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis. +// - Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references. +// - We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge. +// - When running with a coldstore, we next copy all cold objects to the coldstore. +// - At this point we are ready to begin purging: +// - We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references) +// - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live +// - We then end the transaction and compact/gc the hotstore. +func (s *SplitStore) compact(curTs *types.TipSet) { + log.Info("waiting for active views to complete") + start := time.Now() + s.viewWait() + log.Infow("waiting for active views done", "took", time.Since(start)) + + start = time.Now() + err := s.doCompact(curTs) + took := time.Since(start).Milliseconds() + stats.Record(s.ctx, metrics.SplitstoreCompactionTimeSeconds.M(float64(took)/1e3)) + + if err != nil { + log.Errorf("COMPACTION ERROR: %s", err) + } +} + +func (s *SplitStore) doCompact(curTs *types.TipSet) error { + currentEpoch := curTs.Height() + boundaryEpoch := currentEpoch - CompactionBoundary + + log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "compactionIndex", s.compactionIndex) + + markSet, err := s.markSetEnv.Create("live", s.markSetSize) + if err != nil { + return xerrors.Errorf("error creating mark set: %w", err) + } + defer markSet.Close() //nolint:errcheck + defer s.debug.Flush() + + if err := s.checkClosing(); err != nil { + return err + } + + // we are ready for concurrent marking + s.beginTxnMarking(markSet) + + // 0. track all protected references at beginning of compaction; anything added later should + // be transactionally protected by the write + log.Info("protecting references with registered protectors") + err = s.applyProtectors() + if err != nil { + return err + } + + // 1. mark reachable objects by walking the chain from the current epoch; we keep state roots + // and messages until the boundary epoch. + log.Info("marking reachable objects") + startMark := time.Now() + + var count int64 + err = s.walkChain(curTs, boundaryEpoch, true, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + count++ + return markSet.Mark(c) + }) + + if err != nil { + return xerrors.Errorf("error marking: %w", err) + } + + s.markSetSize = count + count>>2 // overestimate a bit + + log.Infow("marking done", "took", time.Since(startMark), "marked", count) + + if err := s.checkClosing(); err != nil { + return err + } + + // 1.1 protect transactional refs + err = s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + + if err := s.checkClosing(); err != nil { + return err + } + + // 2. iterate through the hotstore to collect cold objects + log.Info("collecting cold objects") + startCollect := time.Now() + + // some stats for logging + var hotCnt, coldCnt int + + cold := make([]cid.Cid, 0, s.coldPurgeSize) + err = s.hot.ForEachKey(func(c cid.Cid) error { + // was it marked? + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking mark set for %s: %w", c, err) + } + + if mark { + hotCnt++ + return nil + } + + // it's cold, mark it as candidate for move + cold = append(cold, c) + coldCnt++ + + return nil + }) + + if err != nil { + return xerrors.Errorf("error collecting cold objects: %w", err) + } + + log.Infow("cold collection done", "took", time.Since(startCollect)) + + if coldCnt > 0 { + s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit + } + + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) + stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) + stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) + + if err := s.checkClosing(); err != nil { + return err + } + + // now that we have collected cold objects, check for missing references from transactional i/o + // and disable further collection of such references (they will not be acted upon as we can't + // possibly delete objects we didn't have when we were collecting cold objects) + s.waitForMissingRefs(markSet) + + if err := s.checkClosing(); err != nil { + return err + } + + // 3. copy the cold objects to the coldstore -- if we have one + if !s.cfg.DiscardColdBlocks { + log.Info("moving cold objects to the coldstore") + startMove := time.Now() + err = s.moveColdBlocks(cold) + if err != nil { + return xerrors.Errorf("error moving cold objects: %w", err) + } + log.Infow("moving done", "took", time.Since(startMove)) + + if err := s.checkClosing(); err != nil { + return err + } + } + + // 4. sort cold objects so that the dags with most references are deleted first + // this ensures that we can't refer to a dag with its consituents already deleted, ie + // we lave no dangling references. + log.Info("sorting cold objects") + startSort := time.Now() + err = s.sortObjects(cold) + if err != nil { + return xerrors.Errorf("error sorting objects: %w", err) + } + log.Infow("sorting done", "took", time.Since(startSort)) + + // 4.1 protect transactional refs once more + // strictly speaking, this is not necessary as purge will do it before deleting each + // batch. however, there is likely a largish number of references accumulated during + // ths sort and this protects before entering pruge context. + err = s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + + if err := s.checkClosing(); err != nil { + return err + } + + // 5. purge cold objects from the hotstore, taking protected references into account + log.Info("purging cold objects from the hotstore") + startPurge := time.Now() + err = s.purge(cold, markSet) + if err != nil { + return xerrors.Errorf("error purging cold blocks: %w", err) + } + log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) + + // we are done; do some housekeeping + s.endTxnProtect() + s.gcHotstore() + + err = s.setBaseEpoch(boundaryEpoch) + if err != nil { + return xerrors.Errorf("error saving base epoch: %w", err) + } + + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + return xerrors.Errorf("error saving mark set size: %w", err) + } + + s.compactionIndex++ + err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + if err != nil { + return xerrors.Errorf("error saving compaction index: %w", err) + } + + return nil +} + +func (s *SplitStore) beginTxnProtect() { + log.Info("preparing compaction transaction") + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + s.txnActive = true + s.txnRefs = make(map[cid.Cid]struct{}) + s.txnMissing = make(map[cid.Cid]struct{}) +} + +func (s *SplitStore) beginTxnMarking(markSet MarkSet) { + markSet.SetConcurrent() + + s.txnLk.Lock() + s.txnProtect = markSet + s.txnLk.Unlock() +} + +func (s *SplitStore) endTxnProtect() { + s.txnLk.Lock() + defer s.txnLk.Unlock() + + if !s.txnActive { + return + } + + // release markset memory + if s.txnProtect != nil { + _ = s.txnProtect.Close() + } + + s.txnActive = false + s.txnProtect = nil + s.txnRefs = nil + s.txnMissing = nil +} + +func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMsgs bool, + f func(cid.Cid) error) error { + visited := cid.NewSet() + walked := cid.NewSet() + toWalk := ts.Cids() + walkCnt := 0 + scanCnt := 0 + + walkBlock := func(c cid.Cid) error { + if !visited.Visit(c) { + return nil + } + + walkCnt++ + + if err := f(c); err != nil { + return err + } + + var hdr types.BlockHeader + err := s.view(c, func(data []byte) error { + return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) + }) + + if err != nil { + return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err) + } + + // we only scan the block if it is at or above the boundary + if hdr.Height >= boundary || hdr.Height == 0 { + scanCnt++ + if inclMsgs && hdr.Height > 0 { + if err := s.walkObject(hdr.Messages, walked, f); err != nil { + return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) + } + + if err := s.walkObject(hdr.ParentMessageReceipts, walked, f); err != nil { + return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } + } + + if err := s.walkObject(hdr.ParentStateRoot, walked, f); err != nil { + return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) + } + } + + if hdr.Height > 0 { + toWalk = append(toWalk, hdr.Parents...) + } + + return nil + } + + for len(toWalk) > 0 { + // walking can take a while, so check this with every opportunity + if err := s.checkClosing(); err != nil { + return err + } + + walking := toWalk + toWalk = nil + for _, c := range walking { + if err := walkBlock(c); err != nil { + return xerrors.Errorf("error walking block (cid: %s): %w", c, err) + } + } + } + + log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt) + + return nil +} + +func (s *SplitStore) walkObject(c cid.Cid, walked *cid.Set, f func(cid.Cid) error) error { + if !walked.Visit(c) { + return nil + } + + if err := f(c); err != nil { + if err == errStopWalk { + return nil + } + + return err + } + + if c.Prefix().Codec != cid.DagCBOR { + return nil + } + + // check this before recursing + if err := s.checkClosing(); err != nil { + return err + } + + var links []cid.Cid + err := s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + + if err != nil { + return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + } + + for _, c := range links { + err := s.walkObject(c, walked, f) + if err != nil { + return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + } + } + + return nil +} + +// like walkObject, but the object may be potentially incomplete (references missing) +func (s *SplitStore) walkObjectIncomplete(c cid.Cid, walked *cid.Set, f, missing func(cid.Cid) error) error { + if !walked.Visit(c) { + return nil + } + + // occurs check -- only for DAGs + if c.Prefix().Codec == cid.DagCBOR { + has, err := s.has(c) + if err != nil { + return xerrors.Errorf("error occur checking %s: %w", c, err) + } + + if !has { + err = missing(c) + if err == errStopWalk { + return nil + } + + return err + } + } + + if err := f(c); err != nil { + if err == errStopWalk { + return nil + } + + return err + } + + if c.Prefix().Codec != cid.DagCBOR { + return nil + } + + // check this before recursing + if err := s.checkClosing(); err != nil { + return err + } + + var links []cid.Cid + err := s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + + if err != nil { + return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + } + + for _, c := range links { + err := s.walkObjectIncomplete(c, walked, f, missing) + if err != nil { + return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + } + } + + return nil +} + +// internal version used by walk +func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return err + } + + return cb(data) + } + + err := s.hot.View(c, cb) + switch err { + case bstore.ErrNotFound: + return s.cold.View(c, cb) + + default: + return err + } +} + +func (s *SplitStore) has(c cid.Cid) (bool, error) { + if isIdentiyCid(c) { + return true, nil + } + + has, err := s.hot.Has(c) + + if has || err != nil { + return has, err + } + + return s.cold.Has(c) +} + +func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { + batch := make([]blocks.Block, 0, batchSize) + + for _, c := range cold { + if err := s.checkClosing(); err != nil { + return err + } + + blk, err := s.hot.Get(c) + if err != nil { + if err == bstore.ErrNotFound { + log.Warnf("hotstore missing block %s", c) + continue + } + + return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) + } + + batch = append(batch, blk) + if len(batch) == batchSize { + err = s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting batch to coldstore: %w", err) + } + batch = batch[:0] + } + } + + if len(batch) > 0 { + err := s.cold.PutMany(batch) + if err != nil { + return xerrors.Errorf("error putting batch to coldstore: %w", err) + } + } + + return nil +} + +// sorts a slice of objects heaviest first -- it's a little expensive but worth the +// guarantee that we don't leave dangling references behind, e.g. if we die in the middle +// of a purge. +func (s *SplitStore) sortObjects(cids []cid.Cid) error { + // we cache the keys to avoid making a gazillion of strings + keys := make(map[cid.Cid]string) + key := func(c cid.Cid) string { + s, ok := keys[c] + if !ok { + s = string(c.Hash()) + keys[c] = s + } + return s + } + + // compute sorting weights as the cumulative number of DAG links + weights := make(map[string]int) + for _, c := range cids { + // this can take quite a while, so check for shutdown with every opportunity + if err := s.checkClosing(); err != nil { + return err + } + + w := s.getObjectWeight(c, weights, key) + weights[key(c)] = w + } + + // sort! + sort.Slice(cids, func(i, j int) bool { + wi := weights[key(cids[i])] + wj := weights[key(cids[j])] + if wi == wj { + return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0 + } + + return wi > wj + }) + + return nil +} + +func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int { + w, ok := weights[key(c)] + if ok { + return w + } + + // we treat block headers specially to avoid walking the entire chain + var hdr types.BlockHeader + err := s.view(c, func(data []byte) error { + return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) + }) + if err == nil { + w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key) + weights[key(hdr.ParentStateRoot)] = w1 + + w2 := s.getObjectWeight(hdr.Messages, weights, key) + weights[key(hdr.Messages)] = w2 + + return 1 + w1 + w2 + } + + var links []cid.Cid + err = s.view(c, func(data []byte) error { + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + links = append(links, c) + }) + }) + if err != nil { + return 1 + } + + w = 1 + for _, c := range links { + // these are internal refs, so dags will be dags + if c.Prefix().Codec != cid.DagCBOR { + w++ + continue + } + + wc := s.getObjectWeight(c, weights, key) + weights[key(c)] = wc + + w += wc + } + + return w +} + +func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { + if len(cids) == 0 { + return nil + } + + // we don't delete one giant batch of millions of objects, but rather do smaller batches + // so that we don't stop the world for an extended period of time + done := false + for i := 0; !done; i++ { + start := i * batchSize + end := start + batchSize + if end >= len(cids) { + end = len(cids) + done = true + } + + err := deleteBatch(cids[start:end]) + if err != nil { + return xerrors.Errorf("error deleting batch: %w", err) + } + } + + return nil +} + +func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error { + deadCids := make([]cid.Cid, 0, batchSize) + var purgeCnt, liveCnt int + defer func() { + log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) + }() + + return s.purgeBatch(cids, + func(cids []cid.Cid) error { + deadCids := deadCids[:0] + + for { + if err := s.checkClosing(); err != nil { + return err + } + + s.txnLk.Lock() + if len(s.txnRefs) == 0 { + // keep the lock! + break + } + + // unlock and protect + s.txnLk.Unlock() + + err := s.protectTxnRefs(markSet) + if err != nil { + return xerrors.Errorf("error protecting transactional refs: %w", err) + } + } + + defer s.txnLk.Unlock() + + for _, c := range cids { + live, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking for liveness: %w", err) + } + + if live { + liveCnt++ + continue + } + + deadCids = append(deadCids, c) + } + + err := s.hot.DeleteMany(deadCids) + if err != nil { + return xerrors.Errorf("error purging cold objects: %w", err) + } + + s.debug.LogDelete(deadCids) + + purgeCnt += len(deadCids) + return nil + }) +} + +// I really don't like having this code, but we seem to have some occasional DAG references with +// missing constituents. During testing in mainnet *some* of these references *sometimes* appeared +// after a little bit. +// We need to figure out where they are coming from and eliminate that vector, but until then we +// have this gem[TM]. +// My best guess is that they are parent message receipts or yet to be computed state roots; magik +// thinks the cause may be block validation. +func (s *SplitStore) waitForMissingRefs(markSet MarkSet) { + s.txnLk.Lock() + missing := s.txnMissing + s.txnMissing = nil + s.txnLk.Unlock() + + if len(missing) == 0 { + return + } + + log.Info("waiting for missing references") + start := time.Now() + count := 0 + defer func() { + log.Infow("waiting for missing references done", "took", time.Since(start), "marked", count) + }() + + for i := 0; i < 3 && len(missing) > 0; i++ { + if err := s.checkClosing(); err != nil { + return + } + + wait := time.Duration(i) * time.Minute + log.Infof("retrying for %d missing references in %s (attempt: %d)", len(missing), wait, i+1) + if wait > 0 { + time.Sleep(wait) + } + + towalk := missing + walked := cid.NewSet() + missing = make(map[cid.Cid]struct{}) + + for c := range towalk { + err := s.walkObjectIncomplete(c, walked, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + mark, err := markSet.Has(c) + if err != nil { + return xerrors.Errorf("error checking markset for %s: %w", c, err) + } + + if mark { + return errStopWalk + } + + count++ + return markSet.Mark(c) + }, + func(c cid.Cid) error { + missing[c] = struct{}{} + return errStopWalk + }) + + if err != nil { + log.Warnf("error marking: %s", err) + } + } + } + + if len(missing) > 0 { + log.Warnf("still missing %d references", len(missing)) + for c := range missing { + log.Warnf("unresolved missing reference: %s", c) + } + } +} diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go new file mode 100644 index 000000000..1065e460c --- /dev/null +++ b/blockstore/splitstore/splitstore_expose.go @@ -0,0 +1,114 @@ +package splitstore + +import ( + "context" + "errors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + bstore "github.com/filecoin-project/lotus/blockstore" +) + +type exposedSplitStore struct { + s *SplitStore +} + +var _ bstore.Blockstore = (*exposedSplitStore)(nil) + +func (s *SplitStore) Expose() bstore.Blockstore { + return &exposedSplitStore{s: s} +} + +func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error { + return errors.New("DeleteBlock: operation not supported") +} + +func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error { + return errors.New("DeleteMany: operation not supported") +} + +func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) { + if isIdentiyCid(c) { + return true, nil + } + + has, err := es.s.hot.Has(c) + if has || err != nil { + return has, err + } + + return es.s.cold.Has(c) +} + +func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(data, c) + } + + blk, err := es.s.hot.Get(c) + switch err { + case bstore.ErrNotFound: + return es.s.cold.Get(c) + default: + return blk, err + } +} + +func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return 0, err + } + + return len(data), nil + } + + size, err := es.s.hot.GetSize(c) + switch err { + case bstore.ErrNotFound: + return es.s.cold.GetSize(c) + default: + return size, err + } +} + +func (es *exposedSplitStore) Put(blk blocks.Block) error { + return es.s.Put(blk) +} + +func (es *exposedSplitStore) PutMany(blks []blocks.Block) error { + return es.s.PutMany(blks) +} + +func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return es.s.AllKeysChan(ctx) +} + +func (es *exposedSplitStore) HashOnRead(enabled bool) {} + +func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { + if isIdentiyCid(c) { + data, err := decodeIdentityCid(c) + if err != nil { + return err + } + + return f(data) + } + + err := es.s.hot.View(c, f) + switch err { + case bstore.ErrNotFound: + return es.s.cold.View(c, f) + + default: + return err + } +} diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go new file mode 100644 index 000000000..46668167c --- /dev/null +++ b/blockstore/splitstore/splitstore_gc.go @@ -0,0 +1,30 @@ +package splitstore + +import ( + "fmt" + "time" + + bstore "github.com/filecoin-project/lotus/blockstore" +) + +func (s *SplitStore) gcHotstore() { + if err := s.gcBlockstoreOnline(s.hot); err != nil { + log.Warnf("error garbage collecting hostore: %s", err) + } +} + +func (s *SplitStore) gcBlockstoreOnline(b bstore.Blockstore) error { + if gc, ok := b.(bstore.BlockstoreGC); ok { + log.Info("garbage collecting blockstore") + startGC := time.Now() + + if err := gc.CollectGarbage(); err != nil { + return err + } + + log.Infow("garbage collecting hotstore done", "took", time.Since(startGC)) + return nil + } + + return fmt.Errorf("blockstore doesn't support online gc: %T", b) +} diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index e5314b80f..26e5c3cc0 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -2,6 +2,7 @@ package splitstore import ( "context" + "errors" "fmt" "sync" "sync/atomic" @@ -13,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" datastore "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" @@ -21,22 +23,34 @@ import ( func init() { CompactionThreshold = 5 - CompactionCold = 1 CompactionBoundary = 2 logging.SetLogLevel("splitstore", "DEBUG") } func testSplitStore(t *testing.T, cfg *Config) { - chain := &mockChain{} - // genesis - genBlock := mock.MkBlock(nil, 0, 0) - genTs := mock.TipSet(genBlock) - chain.push(genTs) + chain := &mockChain{t: t} // the myriads of stores ds := dssync.MutexWrap(datastore.NewMapDatastore()) - hot := blockstore.NewMemorySync() - cold := blockstore.NewMemorySync() + hot := newMockStore() + cold := newMockStore() + + // this is necessary to avoid the garbage mock puts in the blocks + garbage := blocks.NewBlock([]byte{1, 2, 3}) + err := cold.Put(garbage) + if err != nil { + t.Fatal(err) + } + + // genesis + genBlock := mock.MkBlock(nil, 0, 0) + genBlock.Messages = garbage.Cid() + genBlock.ParentMessageReceipts = garbage.Cid() + genBlock.ParentStateRoot = garbage.Cid() + genBlock.Timestamp = uint64(time.Now().Unix()) + + genTs := mock.TipSet(genBlock) + chain.push(genTs) // put the genesis block to cold store blk, err := genBlock.ToStorageBlock() @@ -49,6 +63,20 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } + // create a garbage block that is protected with a rgistered protector + protected := blocks.NewBlock([]byte("protected!")) + err = hot.Put(protected) + if err != nil { + t.Fatal(err) + } + + // and another one that is not protected + unprotected := blocks.NewBlock([]byte("unprotected!")) + err = hot.Put(unprotected) + if err != nil { + t.Fatal(err) + } + // open the splitstore ss, err := Open("", ds, hot, cold, cfg) if err != nil { @@ -56,18 +84,33 @@ func testSplitStore(t *testing.T, cfg *Config) { } defer ss.Close() //nolint + // register our protector + ss.AddProtector(func(protect func(cid.Cid) error) error { + return protect(protected.Cid()) + }) + err = ss.Start(chain) if err != nil { t.Fatal(err) } // make some tipsets, but not enough to cause compaction - mkBlock := func(curTs *types.TipSet, i int) *types.TipSet { + mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet { blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + + blk.Messages = garbage.Cid() + blk.ParentMessageReceipts = garbage.Cid() + blk.ParentStateRoot = stateRoot.Cid() + blk.Timestamp = uint64(time.Now().Unix()) + sblk, err := blk.ToStorageBlock() if err != nil { t.Fatal(err) } + err = ss.Put(stateRoot) + if err != nil { + t.Fatal(err) + } err = ss.Put(sblk) if err != nil { t.Fatal(err) @@ -78,18 +121,6 @@ func testSplitStore(t *testing.T, cfg *Config) { return ts } - mkGarbageBlock := func(curTs *types.TipSet, i int) { - blk := mock.MkBlock(curTs, uint64(i), uint64(i)) - sblk, err := blk.ToStorageBlock() - if err != nil { - t.Fatal(err) - } - err = ss.Put(sblk) - if err != nil { - t.Fatal(err) - } - } - waitForCompaction := func() { for atomic.LoadInt32(&ss.compacting) == 1 { time.Sleep(100 * time.Millisecond) @@ -98,100 +129,92 @@ func testSplitStore(t *testing.T, cfg *Config) { curTs := genTs for i := 1; i < 5; i++ { - curTs = mkBlock(curTs, i) + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) waitForCompaction() } - mkGarbageBlock(genTs, 1) - // count objects in the cold and hot stores - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - countBlocks := func(bs blockstore.Blockstore) int { count := 0 - ch, err := bs.AllKeysChan(ctx) - if err != nil { - t.Fatal(err) - } - for range ch { + _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error { count++ - } + return nil + }) return count } coldCnt := countBlocks(cold) hotCnt := countBlocks(hot) - if coldCnt != 1 { - t.Errorf("expected %d blocks, but got %d", 1, coldCnt) + if coldCnt != 2 { + t.Errorf("expected %d blocks, but got %d", 2, coldCnt) } - if hotCnt != 5 { - t.Errorf("expected %d blocks, but got %d", 5, hotCnt) + if hotCnt != 12 { + t.Errorf("expected %d blocks, but got %d", 12, hotCnt) } // trigger a compaction for i := 5; i < 10; i++ { - curTs = mkBlock(curTs, i) + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) waitForCompaction() } coldCnt = countBlocks(cold) hotCnt = countBlocks(hot) - if !cfg.EnableFullCompaction { - if coldCnt != 5 { - t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt) - } - - if hotCnt != 5 { - t.Errorf("expected %d hot blocks, but got %d", 5, hotCnt) - } + if coldCnt != 6 { + t.Errorf("expected %d cold blocks, but got %d", 6, coldCnt) } - if cfg.EnableFullCompaction && !cfg.EnableGC { - if coldCnt != 3 { - t.Errorf("expected %d cold blocks, but got %d", 3, coldCnt) - } - - if hotCnt != 7 { - t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt) - } + if hotCnt != 18 { + t.Errorf("expected %d hot blocks, but got %d", 18, hotCnt) } - if cfg.EnableFullCompaction && cfg.EnableGC { - if coldCnt != 2 { - t.Errorf("expected %d cold blocks, but got %d", 2, coldCnt) - } - - if hotCnt != 7 { - t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt) - } + // ensure our protected block is still there + has, err := hot.Has(protected.Cid()) + if err != nil { + t.Fatal(err) } + + if !has { + t.Fatal("protected block is missing from hotstore") + } + + // ensure our unprotected block is in the coldstore now + has, err = hot.Has(unprotected.Cid()) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unprotected block is still in hotstore") + } + + has, err = cold.Has(unprotected.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("unprotected block is missing from coldstore") + } + + // Make sure we can revert without panicking. + chain.revert(2) } -func TestSplitStoreSimpleCompaction(t *testing.T) { - testSplitStore(t, &Config{TrackingStoreType: "mem"}) -} - -func TestSplitStoreFullCompactionWithoutGC(t *testing.T) { - testSplitStore(t, &Config{ - TrackingStoreType: "mem", - EnableFullCompaction: true, - }) -} - -func TestSplitStoreFullCompactionWithGC(t *testing.T) { - testSplitStore(t, &Config{ - TrackingStoreType: "mem", - EnableFullCompaction: true, - EnableGC: true, - }) +func TestSplitStoreCompaction(t *testing.T) { + testSplitStore(t, &Config{MarkSetType: "map"}) } type mockChain struct { + t testing.TB + sync.Mutex + genesis *types.BlockHeader tipsets []*types.TipSet listener func(revert []*types.TipSet, apply []*types.TipSet) error } @@ -199,12 +222,34 @@ type mockChain struct { func (c *mockChain) push(ts *types.TipSet) { c.Lock() c.tipsets = append(c.tipsets, ts) + if c.genesis == nil { + c.genesis = ts.Blocks()[0] + } c.Unlock() if c.listener != nil { err := c.listener(nil, []*types.TipSet{ts}) if err != nil { - log.Errorf("mockchain: error dispatching listener: %s", err) + c.t.Errorf("mockchain: error dispatching listener: %s", err) + } + } +} + +func (c *mockChain) revert(count int) { + c.Lock() + revert := make([]*types.TipSet, count) + if count > len(c.tipsets) { + c.Unlock() + c.t.Fatalf("not enough tipsets to revert") + } + copy(revert, c.tipsets[len(c.tipsets)-count:]) + c.tipsets = c.tipsets[:len(c.tipsets)-count] + c.Unlock() + + if c.listener != nil { + err := c.listener(revert, nil) + if err != nil { + c.t.Errorf("mockchain: error dispatching listener: %s", err) } } } @@ -218,7 +263,7 @@ func (c *mockChain) GetTipsetByHeight(_ context.Context, epoch abi.ChainEpoch, _ return nil, fmt.Errorf("bad epoch %d", epoch) } - return c.tipsets[iEpoch-1], nil + return c.tipsets[iEpoch], nil } func (c *mockChain) GetHeaviestTipSet() *types.TipSet { @@ -232,24 +277,105 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app c.listener = change } -func (c *mockChain) WalkSnapshot(_ context.Context, ts *types.TipSet, epochs abi.ChainEpoch, _ bool, _ bool, f func(cid.Cid) error) error { - c.Lock() - defer c.Unlock() +type mockStore struct { + mx sync.Mutex + set map[cid.Cid]blocks.Block +} - start := int(ts.Height()) - 1 - end := start - int(epochs) - if end < 0 { - end = -1 +func newMockStore() *mockStore { + return &mockStore{set: make(map[cid.Cid]blocks.Block)} +} + +func (b *mockStore) Has(cid cid.Cid) (bool, error) { + b.mx.Lock() + defer b.mx.Unlock() + _, ok := b.set[cid] + return ok, nil +} + +func (b *mockStore) HashOnRead(hor bool) {} + +func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { + b.mx.Lock() + defer b.mx.Unlock() + + blk, ok := b.set[cid] + if !ok { + return nil, blockstore.ErrNotFound } - for i := start; i > end; i-- { - ts := c.tipsets[i] - for _, cid := range ts.Cids() { - err := f(cid) - if err != nil { - return err - } - } + return blk, nil +} + +func (b *mockStore) GetSize(cid cid.Cid) (int, error) { + blk, err := b.Get(cid) + if err != nil { + return 0, err } + return len(blk.RawData()), nil +} + +func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error { + blk, err := b.Get(cid) + if err != nil { + return err + } + return f(blk.RawData()) +} + +func (b *mockStore) Put(blk blocks.Block) error { + b.mx.Lock() + defer b.mx.Unlock() + + b.set[blk.Cid()] = blk + return nil +} + +func (b *mockStore) PutMany(blks []blocks.Block) error { + b.mx.Lock() + defer b.mx.Unlock() + + for _, blk := range blks { + b.set[blk.Cid()] = blk + } + return nil +} + +func (b *mockStore) DeleteBlock(cid cid.Cid) error { + b.mx.Lock() + defer b.mx.Unlock() + + delete(b.set, cid) + return nil +} + +func (b *mockStore) DeleteMany(cids []cid.Cid) error { + b.mx.Lock() + defer b.mx.Unlock() + + for _, c := range cids { + delete(b.set, c) + } + return nil +} + +func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, errors.New("not implemented") +} + +func (b *mockStore) ForEachKey(f func(cid.Cid) error) error { + b.mx.Lock() + defer b.mx.Unlock() + + for c := range b.set { + err := f(c) + if err != nil { + return err + } + } + return nil +} + +func (b *mockStore) Close() error { return nil } diff --git a/blockstore/splitstore/splitstore_util.go b/blockstore/splitstore/splitstore_util.go new file mode 100644 index 000000000..aef845832 --- /dev/null +++ b/blockstore/splitstore/splitstore_util.go @@ -0,0 +1,67 @@ +package splitstore + +import ( + "encoding/binary" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + "github.com/filecoin-project/go-state-types/abi" +) + +func epochToBytes(epoch abi.ChainEpoch) []byte { + return uint64ToBytes(uint64(epoch)) +} + +func bytesToEpoch(buf []byte) abi.ChainEpoch { + return abi.ChainEpoch(bytesToUint64(buf)) +} + +func int64ToBytes(i int64) []byte { + return uint64ToBytes(uint64(i)) +} + +func bytesToInt64(buf []byte) int64 { + return int64(bytesToUint64(buf)) +} + +func uint64ToBytes(i uint64) []byte { + buf := make([]byte, 16) + n := binary.PutUvarint(buf, i) + return buf[:n] +} + +func bytesToUint64(buf []byte) uint64 { + i, _ := binary.Uvarint(buf) + return i +} + +func isUnitaryObject(c cid.Cid) bool { + pre := c.Prefix() + switch pre.Codec { + case cid.FilCommitmentSealed, cid.FilCommitmentUnsealed: + return true + default: + return pre.MhType == mh.IDENTITY + } +} + +func isIdentiyCid(c cid.Cid) bool { + return c.Prefix().MhType == mh.IDENTITY +} + +func decodeIdentityCid(c cid.Cid) ([]byte, error) { + dmh, err := mh.Decode(c.Hash()) + if err != nil { + return nil, xerrors.Errorf("error decoding identity cid %s: %w", c, err) + } + + // sanity check + if dmh.Code != mh.IDENTITY { + return nil, xerrors.Errorf("error decoding identity cid %s: hash type is not identity", c) + } + + return dmh.Digest, nil +} diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go new file mode 100644 index 000000000..fa5192587 --- /dev/null +++ b/blockstore/splitstore/splitstore_warmup.go @@ -0,0 +1,126 @@ +package splitstore + +import ( + "sync/atomic" + "time" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +// warmup acuiqres the compaction lock and spawns a goroutine to warm up the hotstore; +// this is necessary when we sync from a snapshot or when we enable the splitstore +// on top of an existing blockstore (which becomes the coldstore). +func (s *SplitStore) warmup(curTs *types.TipSet) error { + if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { + return xerrors.Errorf("error locking compaction") + } + + go func() { + defer atomic.StoreInt32(&s.compacting, 0) + + log.Info("warming up hotstore") + start := time.Now() + + err := s.doWarmup(curTs) + if err != nil { + log.Errorf("error warming up hotstore: %s", err) + return + } + + log.Infow("warm up done", "took", time.Since(start)) + }() + + return nil +} + +// the actual warmup procedure; it walks the chain loading all state roots at the boundary +// and headers all the way up to genesis. +// objects are written in batches so as to minimize overhead. +func (s *SplitStore) doWarmup(curTs *types.TipSet) error { + epoch := curTs.Height() + batchHot := make([]blocks.Block, 0, batchSize) + count := int64(0) + xcount := int64(0) + missing := int64(0) + err := s.walkChain(curTs, epoch, false, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + count++ + + has, err := s.hot.Has(c) + if err != nil { + return err + } + + if has { + return nil + } + + blk, err := s.cold.Get(c) + if err != nil { + if err == bstore.ErrNotFound { + missing++ + return nil + } + return err + } + + xcount++ + + batchHot = append(batchHot, blk) + if len(batchHot) == batchSize { + err = s.hot.PutMany(batchHot) + if err != nil { + return err + } + batchHot = batchHot[:0] + } + + return nil + }) + + if err != nil { + return err + } + + if len(batchHot) > 0 { + err = s.hot.PutMany(batchHot) + if err != nil { + return err + } + } + + log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) + + s.markSetSize = count + count>>2 // overestimate a bit + err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + if err != nil { + log.Warnf("error saving mark set size: %s", err) + } + + // save the warmup epoch + err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) + if err != nil { + return xerrors.Errorf("error saving warm up epoch: %w", err) + } + s.mx.Lock() + s.warmupEpoch = epoch + s.mx.Unlock() + + // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes + err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + if err != nil { + return xerrors.Errorf("error saving compaction index: %w", err) + } + + return nil +} diff --git a/blockstore/splitstore/tracking.go b/blockstore/splitstore/tracking.go deleted file mode 100644 index d57fd45ef..000000000 --- a/blockstore/splitstore/tracking.go +++ /dev/null @@ -1,109 +0,0 @@ -package splitstore - -import ( - "path/filepath" - "sync" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - cid "github.com/ipfs/go-cid" -) - -// TrackingStore is a persistent store that tracks blocks that are added -// to the hotstore, tracking the epoch at which they are written. -type TrackingStore interface { - Put(cid.Cid, abi.ChainEpoch) error - PutBatch([]cid.Cid, abi.ChainEpoch) error - Get(cid.Cid) (abi.ChainEpoch, error) - Delete(cid.Cid) error - DeleteBatch([]cid.Cid) error - ForEach(func(cid.Cid, abi.ChainEpoch) error) error - Sync() error - Close() error -} - -// OpenTrackingStore opens a tracking store of the specified type in the -// specified path. -func OpenTrackingStore(path string, ttype string) (TrackingStore, error) { - switch ttype { - case "", "bolt": - return OpenBoltTrackingStore(filepath.Join(path, "tracker.bolt")) - case "mem": - return NewMemTrackingStore(), nil - default: - return nil, xerrors.Errorf("unknown tracking store type %s", ttype) - } -} - -// NewMemTrackingStore creates an in-memory tracking store. -// This is only useful for test or situations where you don't want to open the -// real tracking store (eg concurrent read only access on a node's datastore) -func NewMemTrackingStore() *MemTrackingStore { - return &MemTrackingStore{tab: make(map[cid.Cid]abi.ChainEpoch)} -} - -// MemTrackingStore is a simple in-memory tracking store -type MemTrackingStore struct { - sync.Mutex - tab map[cid.Cid]abi.ChainEpoch -} - -var _ TrackingStore = (*MemTrackingStore)(nil) - -func (s *MemTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error { - s.Lock() - defer s.Unlock() - s.tab[cid] = epoch - return nil -} - -func (s *MemTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error { - s.Lock() - defer s.Unlock() - for _, cid := range cids { - s.tab[cid] = epoch - } - return nil -} - -func (s *MemTrackingStore) Get(cid cid.Cid) (abi.ChainEpoch, error) { - s.Lock() - defer s.Unlock() - epoch, ok := s.tab[cid] - if ok { - return epoch, nil - } - return 0, xerrors.Errorf("missing tracking epoch for %s", cid) -} - -func (s *MemTrackingStore) Delete(cid cid.Cid) error { - s.Lock() - defer s.Unlock() - delete(s.tab, cid) - return nil -} - -func (s *MemTrackingStore) DeleteBatch(cids []cid.Cid) error { - s.Lock() - defer s.Unlock() - for _, cid := range cids { - delete(s.tab, cid) - } - return nil -} - -func (s *MemTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error { - s.Lock() - defer s.Unlock() - for cid, epoch := range s.tab { - err := f(cid, epoch) - if err != nil { - return err - } - } - return nil -} - -func (s *MemTrackingStore) Sync() error { return nil } -func (s *MemTrackingStore) Close() error { return nil } diff --git a/blockstore/splitstore/tracking_bolt.go b/blockstore/splitstore/tracking_bolt.go deleted file mode 100644 index c5c451e15..000000000 --- a/blockstore/splitstore/tracking_bolt.go +++ /dev/null @@ -1,120 +0,0 @@ -package splitstore - -import ( - "time" - - "golang.org/x/xerrors" - - cid "github.com/ipfs/go-cid" - bolt "go.etcd.io/bbolt" - - "github.com/filecoin-project/go-state-types/abi" -) - -type BoltTrackingStore struct { - db *bolt.DB - bucketId []byte -} - -var _ TrackingStore = (*BoltTrackingStore)(nil) - -func OpenBoltTrackingStore(path string) (*BoltTrackingStore, error) { - opts := &bolt.Options{ - Timeout: 1 * time.Second, - NoSync: true, - } - db, err := bolt.Open(path, 0644, opts) - if err != nil { - return nil, err - } - - bucketId := []byte("tracker") - err = db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(bucketId) - if err != nil { - return xerrors.Errorf("error creating bolt db bucket %s: %w", string(bucketId), err) - } - return nil - }) - - if err != nil { - _ = db.Close() - return nil, err - } - - return &BoltTrackingStore{db: db, bucketId: bucketId}, nil -} - -func (s *BoltTrackingStore) Put(cid cid.Cid, epoch abi.ChainEpoch) error { - val := epochToBytes(epoch) - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.Put(cid.Hash(), val) - }) -} - -func (s *BoltTrackingStore) PutBatch(cids []cid.Cid, epoch abi.ChainEpoch) error { - val := epochToBytes(epoch) - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - for _, cid := range cids { - err := b.Put(cid.Hash(), val) - if err != nil { - return err - } - } - return nil - }) -} - -func (s *BoltTrackingStore) Get(cid cid.Cid) (epoch abi.ChainEpoch, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - val := b.Get(cid.Hash()) - if val == nil { - return xerrors.Errorf("missing tracking epoch for %s", cid) - } - epoch = bytesToEpoch(val) - return nil - }) - return epoch, err -} - -func (s *BoltTrackingStore) Delete(cid cid.Cid) error { - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.Delete(cid.Hash()) - }) -} - -func (s *BoltTrackingStore) DeleteBatch(cids []cid.Cid) error { - return s.db.Batch(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - for _, cid := range cids { - err := b.Delete(cid.Hash()) - if err != nil { - return xerrors.Errorf("error deleting %s", cid) - } - } - return nil - }) -} - -func (s *BoltTrackingStore) ForEach(f func(cid.Cid, abi.ChainEpoch) error) error { - return s.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(s.bucketId) - return b.ForEach(func(k, v []byte) error { - cid := cid.NewCidV1(cid.Raw, k) - epoch := bytesToEpoch(v) - return f(cid, epoch) - }) - }) -} - -func (s *BoltTrackingStore) Sync() error { - return s.db.Sync() -} - -func (s *BoltTrackingStore) Close() error { - return s.db.Close() -} diff --git a/blockstore/splitstore/tracking_test.go b/blockstore/splitstore/tracking_test.go deleted file mode 100644 index afd475da5..000000000 --- a/blockstore/splitstore/tracking_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package splitstore - -import ( - "io/ioutil" - "testing" - - cid "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - - "github.com/filecoin-project/go-state-types/abi" -) - -func TestBoltTrackingStore(t *testing.T) { - testTrackingStore(t, "bolt") -} - -func testTrackingStore(t *testing.T, tsType string) { - t.Helper() - - makeCid := func(key string) cid.Cid { - h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) - if err != nil { - t.Fatal(err) - } - - return cid.NewCidV1(cid.Raw, h) - } - - mustHave := func(s TrackingStore, cid cid.Cid, epoch abi.ChainEpoch) { - val, err := s.Get(cid) - if err != nil { - t.Fatal(err) - } - - if val != epoch { - t.Fatal("epoch mismatch") - } - } - - mustNotHave := func(s TrackingStore, cid cid.Cid) { - _, err := s.Get(cid) - if err == nil { - t.Fatal("expected error") - } - } - - path, err := ioutil.TempDir("", "snoop-test.*") - if err != nil { - t.Fatal(err) - } - - s, err := OpenTrackingStore(path, tsType) - if err != nil { - t.Fatal(err) - } - - k1 := makeCid("a") - k2 := makeCid("b") - k3 := makeCid("c") - k4 := makeCid("d") - - s.Put(k1, 1) //nolint - s.Put(k2, 2) //nolint - s.Put(k3, 3) //nolint - s.Put(k4, 4) //nolint - - mustHave(s, k1, 1) - mustHave(s, k2, 2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - s.Delete(k1) // nolint - s.Delete(k2) // nolint - - mustNotHave(s, k1) - mustNotHave(s, k2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - s.PutBatch([]cid.Cid{k1}, 1) //nolint - s.PutBatch([]cid.Cid{k2}, 2) //nolint - - mustHave(s, k1, 1) - mustHave(s, k2, 2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - allKeys := map[string]struct{}{ - k1.String(): {}, - k2.String(): {}, - k3.String(): {}, - k4.String(): {}, - } - - err = s.ForEach(func(k cid.Cid, _ abi.ChainEpoch) error { - _, ok := allKeys[k.String()] - if !ok { - t.Fatal("unexpected key") - } - - delete(allKeys, k.String()) - return nil - }) - - if err != nil { - t.Fatal(err) - } - - if len(allKeys) != 0 { - t.Fatal("not all keys were returned") - } - - // no close and reopen and ensure the keys still exist - err = s.Close() - if err != nil { - t.Fatal(err) - } - - s, err = OpenTrackingStore(path, tsType) - if err != nil { - t.Fatal(err) - } - - mustHave(s, k1, 1) - mustHave(s, k2, 2) - mustHave(s, k3, 3) - mustHave(s, k4, 4) - - s.Close() //nolint:errcheck -} diff --git a/build/bootstrap.go b/build/bootstrap.go index cd72cfd1b..98fa2e2f9 100644 --- a/build/bootstrap.go +++ b/build/bootstrap.go @@ -2,28 +2,32 @@ package build import ( "context" + "embed" + "path" "strings" "github.com/filecoin-project/lotus/lib/addrutil" - rice "github.com/GeertJohan/go.rice" "github.com/libp2p/go-libp2p-core/peer" ) +//go:embed bootstrap +var bootstrapfs embed.FS + func BuiltinBootstrap() ([]peer.AddrInfo, error) { if DisableBuiltinAssets { return nil, nil } - - b := rice.MustFindBox("bootstrap") - if BootstrappersFile != "" { - spi := b.MustString(BootstrappersFile) - if spi == "" { + spi, err := bootstrapfs.ReadFile(path.Join("bootstrap", BootstrappersFile)) + if err != nil { + return nil, err + } + if len(spi) == 0 { return nil, nil } - return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n")) + return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(string(spi)), "\n")) } return nil, nil diff --git a/build/bootstrap/butterflynet.pi b/build/bootstrap/butterflynet.pi index 0de3043a3..cc4ce4f1d 100644 --- a/build/bootstrap/butterflynet.pi +++ b/build/bootstrap/butterflynet.pi @@ -1,2 +1,2 @@ -/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWRkaF18SR3E6qL6dkGrozT8QJUV5VbhE9E7BZtPmHqdWJ -/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWJcJUc23WJjJHGSboGcU3t76z9Lb7CghrH2tiBiDCY4ux +/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBbZd7Su9XfLUQ12RynGQ3ZmGY1nGqFntmqop9pLNJE6g +/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWGKRzEY4tJFTmAmrYUpa1CVVohmV9YjJbC9v5XWY2gUji diff --git a/build/bootstrap/interopnet.pi b/build/bootstrap/interopnet.pi new file mode 100644 index 000000000..923653d94 --- /dev/null +++ b/build/bootstrap/interopnet.pi @@ -0,0 +1,2 @@ +/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWLGPq9JL1xwL6gHok7HSNxtK1Q5kyfg4Hk69ifRPghn4i +/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWFYS1f31zafv8mqqYu8U3hEqYvaZ6avWzYU3BmZdpyH3h diff --git a/build/genesis.go b/build/genesis.go index 812f5a9df..6d94b38cf 100644 --- a/build/genesis.go +++ b/build/genesis.go @@ -1,23 +1,23 @@ package build import ( - rice "github.com/GeertJohan/go.rice" + "embed" + "path" + logging "github.com/ipfs/go-log/v2" ) // moved from now-defunct build/paramfetch.go var log = logging.Logger("build") +//go:embed genesis +var genesisfs embed.FS + func MaybeGenesis() []byte { - builtinGen, err := rice.FindBox("genesis") + genBytes, err := genesisfs.ReadFile(path.Join("genesis", GenesisFile)) if err != nil { log.Warnf("loading built-in genesis: %s", err) return nil } - genBytes, err := builtinGen.Bytes(GenesisFile) - if err != nil { - log.Warnf("loading built-in genesis: %s", err) - } - return genBytes } diff --git a/build/genesis/butterflynet.car b/build/genesis/butterflynet.car index 6654d7195..7c2d19251 100644 Binary files a/build/genesis/butterflynet.car and b/build/genesis/butterflynet.car differ diff --git a/build/genesis/interopnet.car b/build/genesis/interopnet.car new file mode 100644 index 000000000..2c7c2a498 Binary files /dev/null and b/build/genesis/interopnet.car differ diff --git a/build/openrpc.go b/build/openrpc.go index 0f514c8aa..ac951c172 100644 --- a/build/openrpc.go +++ b/build/openrpc.go @@ -3,13 +3,15 @@ package build import ( "bytes" "compress/gzip" + "embed" "encoding/json" - rice "github.com/GeertJohan/go.rice" - apitypes "github.com/filecoin-project/lotus/api/types" ) +//go:embed openrpc +var openrpcfs embed.FS + func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument { zr, err := gzip.NewReader(bytes.NewBuffer(data)) if err != nil { @@ -28,16 +30,25 @@ func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument { } func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument { - data := rice.MustFindBox("openrpc").MustBytes("full.json.gz") + data, err := openrpcfs.ReadFile("openrpc/full.json.gz") + if err != nil { + panic(err) + } return mustReadGzippedOpenRPCDocument(data) } func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument { - data := rice.MustFindBox("openrpc").MustBytes("miner.json.gz") + data, err := openrpcfs.ReadFile("openrpc/miner.json.gz") + if err != nil { + panic(err) + } return mustReadGzippedOpenRPCDocument(data) } func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument { - data := rice.MustFindBox("openrpc").MustBytes("worker.json.gz") + data, err := openrpcfs.ReadFile("openrpc/worker.json.gz") + if err != nil { + panic(err) + } return mustReadGzippedOpenRPCDocument(data) } diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index c75dfefc6..2c804bac2 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 13d73f069..56f7b21d4 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 5ae5cdb69..514b75335 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/parameters.go b/build/parameters.go index b70dad1c1..9e60f12a6 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -1,11 +1,19 @@ package build -import rice "github.com/GeertJohan/go.rice" +import ( + _ "embed" +) + +//go:embed proof-params/parameters.json +var params []byte + +//go:embed proof-params/srs-inner-product.json +var srs []byte func ParametersJSON() []byte { - return rice.MustFindBox("proof-params").MustBytes("parameters.json") + return params } func SrsJSON() []byte { - return rice.MustFindBox("proof-params").MustBytes("srs-inner-product.json") + return srs } diff --git a/build/params_2k.go b/build/params_2k.go index 8ead218f5..387d2da0b 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -24,29 +24,29 @@ var UpgradeIgnitionHeight = abi.ChainEpoch(-2) var UpgradeRefuelHeight = abi.ChainEpoch(-3) var UpgradeTapeHeight = abi.ChainEpoch(-4) -var UpgradeAssemblyHeight = abi.ChainEpoch(5) -var UpgradeLiftoffHeight = abi.ChainEpoch(-5) +var UpgradeAssemblyHeight = abi.ChainEpoch(-5) +var UpgradeLiftoffHeight = abi.ChainEpoch(-6) -var UpgradeKumquatHeight = abi.ChainEpoch(6) -var UpgradeCalicoHeight = abi.ChainEpoch(7) -var UpgradePersianHeight = abi.ChainEpoch(8) -var UpgradeOrangeHeight = abi.ChainEpoch(9) -var UpgradeClausHeight = abi.ChainEpoch(10) +var UpgradeKumquatHeight = abi.ChainEpoch(-7) +var UpgradeCalicoHeight = abi.ChainEpoch(-8) +var UpgradePersianHeight = abi.ChainEpoch(-9) +var UpgradeOrangeHeight = abi.ChainEpoch(-10) +var UpgradeClausHeight = abi.ChainEpoch(-11) -var UpgradeTrustHeight = abi.ChainEpoch(11) +var UpgradeTrustHeight = abi.ChainEpoch(-12) -var UpgradeNorwegianHeight = abi.ChainEpoch(12) +var UpgradeNorwegianHeight = abi.ChainEpoch(-13) -var UpgradeTurboHeight = abi.ChainEpoch(13) +var UpgradeTurboHeight = abi.ChainEpoch(-14) -var UpgradeHyperdriveHeight = abi.ChainEpoch(14) +var UpgradeHyperdriveHeight = abi.ChainEpoch(-15) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } func init() { - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1) policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10)) diff --git a/build/params_interop.go b/build/params_interop.go new file mode 100644 index 000000000..73cc1c7d9 --- /dev/null +++ b/build/params_interop.go @@ -0,0 +1,104 @@ +// +build interopnet + +package build + +import ( + "os" + "strconv" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +const BootstrappersFile = "interopnet.pi" +const GenesisFile = "interopnet.car" + +var UpgradeBreezeHeight = abi.ChainEpoch(-1) + +const BreezeGasTampingDuration = 0 + +var UpgradeSmokeHeight = abi.ChainEpoch(-1) +var UpgradeIgnitionHeight = abi.ChainEpoch(-2) +var UpgradeRefuelHeight = abi.ChainEpoch(-3) +var UpgradeTapeHeight = abi.ChainEpoch(-4) + +var UpgradeAssemblyHeight = abi.ChainEpoch(-5) +var UpgradeLiftoffHeight = abi.ChainEpoch(-6) + +var UpgradeKumquatHeight = abi.ChainEpoch(-7) +var UpgradeCalicoHeight = abi.ChainEpoch(-8) +var UpgradePersianHeight = abi.ChainEpoch(-9) +var UpgradeOrangeHeight = abi.ChainEpoch(-10) +var UpgradeClausHeight = abi.ChainEpoch(-11) + +var UpgradeTrustHeight = abi.ChainEpoch(-12) + +var UpgradeNorwegianHeight = abi.ChainEpoch(-13) + +var UpgradeTurboHeight = abi.ChainEpoch(-14) + +var UpgradeHyperdriveHeight = abi.ChainEpoch(-15) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +func init() { + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg2KiBV1, + abi.RegisteredSealProof_StackedDrg8MiBV1, + abi.RegisteredSealProof_StackedDrg512MiBV1, + ) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10)) + + getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch { + hs, found := os.LookupEnv(ev) + if found { + h, err := strconv.Atoi(hs) + if err != nil { + log.Panicf("failed to parse %s env var", ev) + } + + return abi.ChainEpoch(h) + } + + return def + } + + UpgradeBreezeHeight = getUpgradeHeight("LOTUS_BREEZE_HEIGHT", UpgradeBreezeHeight) + UpgradeSmokeHeight = getUpgradeHeight("LOTUS_SMOKE_HEIGHT", UpgradeSmokeHeight) + UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight) + UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight) + UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight) + UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight) + UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight) + UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight) + UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight) + UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight) + UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight) + UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight) + UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight) + UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight) + UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight) + UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight) + + BuildType |= BuildInteropnet + SetAddressNetwork(address.Testnet) + Devnet = true +} + +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 2 + +var WhitelistedBlock = cid.Undef diff --git a/build/params_mainnet.go b/build/params_mainnet.go index d639cbf58..1c9b69462 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -4,6 +4,7 @@ // +build !calibnet // +build !nerpanet // +build !butterflynet +// +build !interopnet package build @@ -13,7 +14,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/actors/policy" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) @@ -66,8 +66,6 @@ const UpgradeTurboHeight = 712320 var UpgradeHyperdriveHeight = abi.ChainEpoch(892800) func init() { - policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) - if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { SetAddressNetwork(address.Mainnet) } diff --git a/build/tools.go b/build/tools.go index ad45397bb..57b6e7d1f 100644 --- a/build/tools.go +++ b/build/tools.go @@ -6,4 +6,5 @@ import ( _ "github.com/GeertJohan/go.rice/rice" _ "github.com/golang/mock/mockgen" _ "github.com/whyrusleeping/bencher" + _ "golang.org/x/tools/cmd/stringer" ) diff --git a/build/version.go b/build/version.go index 824e4241f..c6a1be3e2 100644 --- a/build/version.go +++ b/build/version.go @@ -1,14 +1,17 @@ package build +import "os" + var CurrentCommit string var BuildType int const ( - BuildDefault = 0 - BuildMainnet = 0x1 - Build2k = 0x2 - BuildDebug = 0x3 - BuildCalibnet = 0x4 + BuildDefault = 0 + BuildMainnet = 0x1 + Build2k = 0x2 + BuildDebug = 0x3 + BuildCalibnet = 0x4 + BuildInteropnet = 0x5 ) func buildType() string { @@ -23,14 +26,20 @@ func buildType() string { return "+debug" case BuildCalibnet: return "+calibnet" + case BuildInteropnet: + return "+interopnet" default: return "+huh?" } } // BuildVersion is the local build version, set by build system -const BuildVersion = "1.10.1" +const BuildVersion = "1.11.1-dev" func UserVersion() string { + if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { + return BuildVersion + } + return BuildVersion + buildType() + CurrentCommit } diff --git a/chain/actors/agen/main.go b/chain/actors/agen/main.go index d34b43ca8..9a3b8fd20 100644 --- a/chain/actors/agen/main.go +++ b/chain/actors/agen/main.go @@ -6,34 +6,26 @@ import ( "io/ioutil" "os" "path/filepath" + "strconv" "text/template" + lotusactors "github.com/filecoin-project/lotus/chain/actors" + "golang.org/x/xerrors" ) -var latestVersion = 5 - -var versions = []int{0, 2, 3, 4, latestVersion} - -var versionImports = map[int]string{ - 0: "/", - 2: "/v2/", - 3: "/v3/", - 4: "/v4/", - latestVersion: "/v5/", -} - var actors = map[string][]int{ - "account": versions, - "cron": versions, - "init": versions, - "market": versions, - "miner": versions, - "multisig": versions, - "paych": versions, - "power": versions, - "reward": versions, - "verifreg": versions, + "account": lotusactors.Versions, + "cron": lotusactors.Versions, + "init": lotusactors.Versions, + "market": lotusactors.Versions, + "miner": lotusactors.Versions, + "multisig": lotusactors.Versions, + "paych": lotusactors.Versions, + "power": lotusactors.Versions, + "system": lotusactors.Versions, + "reward": lotusactors.Versions, + "verifreg": lotusactors.Versions, } func main() { @@ -72,14 +64,14 @@ func generateAdapters() error { } tpl := template.Must(template.New("").Funcs(template.FuncMap{ - "import": func(v int) string { return versionImports[v] }, + "import": func(v int) string { return getVersionImports()[v] }, }).Parse(string(af))) var b bytes.Buffer err = tpl.Execute(&b, map[string]interface{}{ "versions": versions, - "latestVersion": latestVersion, + "latestVersion": lotusactors.LatestVersion, }) if err != nil { return err @@ -104,14 +96,14 @@ func generateState(actDir string) error { return xerrors.Errorf("loading state adapter template: %w", err) } - for _, version := range versions { + for _, version := range lotusactors.Versions { tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af))) var b bytes.Buffer err := tpl.Execute(&b, map[string]interface{}{ "v": version, - "import": versionImports[version], + "import": getVersionImports()[version], }) if err != nil { return err @@ -135,14 +127,14 @@ func generateMessages(actDir string) error { return xerrors.Errorf("loading message adapter template: %w", err) } - for _, version := range versions { + for _, version := range lotusactors.Versions { tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af))) var b bytes.Buffer err := tpl.Execute(&b, map[string]interface{}{ "v": version, - "import": versionImports[version], + "import": getVersionImports()[version], }) if err != nil { return err @@ -168,13 +160,13 @@ func generatePolicy(policyPath string) error { } tpl := template.Must(template.New("").Funcs(template.FuncMap{ - "import": func(v int) string { return versionImports[v] }, + "import": func(v int) string { return getVersionImports()[v] }, }).Parse(string(pf))) var b bytes.Buffer err = tpl.Execute(&b, map[string]interface{}{ - "versions": versions, - "latestVersion": latestVersion, + "versions": lotusactors.Versions, + "latestVersion": lotusactors.LatestVersion, }) if err != nil { return err @@ -199,13 +191,13 @@ func generateBuiltin(builtinPath string) error { } tpl := template.Must(template.New("").Funcs(template.FuncMap{ - "import": func(v int) string { return versionImports[v] }, + "import": func(v int) string { return getVersionImports()[v] }, }).Parse(string(bf))) var b bytes.Buffer err = tpl.Execute(&b, map[string]interface{}{ - "versions": versions, - "latestVersion": latestVersion, + "versions": lotusactors.Versions, + "latestVersion": lotusactors.LatestVersion, }) if err != nil { return err @@ -217,3 +209,16 @@ func generateBuiltin(builtinPath string) error { return nil } + +func getVersionImports() map[int]string { + versionImports := make(map[int]string, lotusactors.LatestVersion) + for _, v := range lotusactors.Versions { + if v == 0 { + versionImports[v] = "/" + } else { + versionImports[v] = "/v" + strconv.Itoa(v) + "/" + } + } + + return versionImports +} diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 7ac8f62d0..04c82b340 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -1,6 +1,7 @@ package account import ( + "github.com/filecoin-project/lotus/chain/actors" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -69,8 +70,54 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) { + switch av { + + case actors.Version0: + return make0(store, addr) + + case actors.Version2: + return make2(store, addr) + + case actors.Version3: + return make3(store, addr) + + case actors.Version4: + return make4(store, addr) + + case actors.Version5: + return make5(store, addr) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.AccountActorCodeID, nil + + case actors.Version2: + return builtin2.AccountActorCodeID, nil + + case actors.Version3: + return builtin3.AccountActorCodeID, nil + + case actors.Version4: + return builtin4.AccountActorCodeID, nil + + case actors.Version5: + return builtin5.AccountActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler PubkeyAddress() (address.Address, error) + GetState() interface{} } diff --git a/chain/actors/builtin/account/actor.go.template b/chain/actors/builtin/account/actor.go.template index f75af3dfb..53962cc94 100644 --- a/chain/actors/builtin/account/actor.go.template +++ b/chain/actors/builtin/account/actor.go.template @@ -1,6 +1,7 @@ package account import ( + "github.com/filecoin-project/lotus/chain/actors" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -34,8 +35,30 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, addr) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.AccountActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler PubkeyAddress() (address.Address, error) + GetState() interface{} } diff --git a/chain/actors/builtin/account/state.go.template b/chain/actors/builtin/account/state.go.template index 65d874c80..5be262ece 100644 --- a/chain/actors/builtin/account/state.go.template +++ b/chain/actors/builtin/account/state.go.template @@ -20,6 +20,12 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store, addr address.Address) (State, error) { + out := state{{.v}}{store: store} + out.State = account{{.v}}.State{Address:addr} + return &out, nil +} + type state{{.v}} struct { account{{.v}}.State store adt.Store @@ -28,3 +34,7 @@ type state{{.v}} struct { func (s *state{{.v}}) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/account/v0.go b/chain/actors/builtin/account/v0.go index 67c555c5d..bdfca2fd7 100644 --- a/chain/actors/builtin/account/v0.go +++ b/chain/actors/builtin/account/v0.go @@ -20,6 +20,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, addr address.Address) (State, error) { + out := state0{store: store} + out.State = account0.State{Address: addr} + return &out, nil +} + type state0 struct { account0.State store adt.Store @@ -28,3 +34,7 @@ type state0 struct { func (s *state0) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v2.go b/chain/actors/builtin/account/v2.go index 2664631bc..66618e06a 100644 --- a/chain/actors/builtin/account/v2.go +++ b/chain/actors/builtin/account/v2.go @@ -20,6 +20,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, addr address.Address) (State, error) { + out := state2{store: store} + out.State = account2.State{Address: addr} + return &out, nil +} + type state2 struct { account2.State store adt.Store @@ -28,3 +34,7 @@ type state2 struct { func (s *state2) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v3.go b/chain/actors/builtin/account/v3.go index 16b489a3e..dbe100a4f 100644 --- a/chain/actors/builtin/account/v3.go +++ b/chain/actors/builtin/account/v3.go @@ -20,6 +20,12 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store, addr address.Address) (State, error) { + out := state3{store: store} + out.State = account3.State{Address: addr} + return &out, nil +} + type state3 struct { account3.State store adt.Store @@ -28,3 +34,7 @@ type state3 struct { func (s *state3) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v4.go b/chain/actors/builtin/account/v4.go index 1a24007d8..53f71dcc5 100644 --- a/chain/actors/builtin/account/v4.go +++ b/chain/actors/builtin/account/v4.go @@ -20,6 +20,12 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store, addr address.Address) (State, error) { + out := state4{store: store} + out.State = account4.State{Address: addr} + return &out, nil +} + type state4 struct { account4.State store adt.Store @@ -28,3 +34,7 @@ type state4 struct { func (s *state4) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/account/v5.go b/chain/actors/builtin/account/v5.go index e2df5904d..538f56987 100644 --- a/chain/actors/builtin/account/v5.go +++ b/chain/actors/builtin/account/v5.go @@ -20,6 +20,12 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store, addr address.Address) (State, error) { + out := state5{store: store} + out.State = account5.State{Address: addr} + return &out, nil +} + type state5 struct { account5.State store adt.Store @@ -28,3 +34,7 @@ type state5 struct { func (s *state5) PubkeyAddress() (address.Address, error) { return s.Address, nil } + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/builtin.go.template b/chain/actors/builtin/builtin.go.template index 9b89b13f5..031c05182 100644 --- a/chain/actors/builtin/builtin.go.template +++ b/chain/actors/builtin/builtin.go.template @@ -6,9 +6,9 @@ import ( "golang.org/x/xerrors" {{range .versions}} - builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" - smoothing{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/util/smoothing" - {{end}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" + smoothing{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/util/smoothing" + {{end}} "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" @@ -17,7 +17,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner" - proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof" + proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof" ) var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr @@ -53,13 +53,13 @@ func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, } {{range .versions}} - func FromV{{.}}FilterEstimate(v{{.}} smoothing{{.}}.FilterEstimate) FilterEstimate { + func FromV{{.}}FilterEstimate(v{{.}} smoothing{{.}}.FilterEstimate) FilterEstimate { {{if (eq . 0)}} - return (FilterEstimate)(v{{.}}) //nolint:unconvert - {{else}} - return (FilterEstimate)(v{{.}}) - {{end}} - } + return (FilterEstimate)(v{{.}}) //nolint:unconvert + {{else}} + return (FilterEstimate)(v{{.}}) + {{end}} + } {{end}} type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) @@ -80,58 +80,58 @@ func Load(store adt.Store, act *types.Actor) (cbor.Marshaler, error) { func ActorNameByCode(c cid.Cid) string { switch { - {{range .versions}} - case builtin{{.}}.IsBuiltinActor(c): - return builtin{{.}}.ActorNameByCode(c) - {{end}} + {{range .versions}} + case builtin{{.}}.IsBuiltinActor(c): + return builtin{{.}}.ActorNameByCode(c) + {{end}} default: return "" } } func IsBuiltinActor(c cid.Cid) bool { - {{range .versions}} - if builtin{{.}}.IsBuiltinActor(c) { - return true - } - {{end}} - return false + {{range .versions}} + if builtin{{.}}.IsBuiltinActor(c) { + return true + } + {{end}} + return false } func IsAccountActor(c cid.Cid) bool { - {{range .versions}} - if c == builtin{{.}}.AccountActorCodeID { - return true - } - {{end}} - return false + {{range .versions}} + if c == builtin{{.}}.AccountActorCodeID { + return true + } + {{end}} + return false } func IsStorageMinerActor(c cid.Cid) bool { - {{range .versions}} - if c == builtin{{.}}.StorageMinerActorCodeID { - return true - } - {{end}} - return false + {{range .versions}} + if c == builtin{{.}}.StorageMinerActorCodeID { + return true + } + {{end}} + return false } func IsMultisigActor(c cid.Cid) bool { - {{range .versions}} - if c == builtin{{.}}.MultisigActorCodeID { - return true - } - {{end}} - return false + {{range .versions}} + if c == builtin{{.}}.MultisigActorCodeID { + return true + } + {{end}} + return false } func IsPaymentChannelActor(c cid.Cid) bool { - {{range .versions}} - if c == builtin{{.}}.PaymentChannelActorCodeID { - return true - } - {{end}} - return false + {{range .versions}} + if c == builtin{{.}}.PaymentChannelActorCodeID { + return true + } + {{end}} + return false } func makeAddress(addr string) address.Address { diff --git a/chain/actors/builtin/cron/actor.go.template b/chain/actors/builtin/cron/actor.go.template index 6b7449617..d73808556 100644 --- a/chain/actors/builtin/cron/actor.go.template +++ b/chain/actors/builtin/cron/actor.go.template @@ -1,10 +1,42 @@ package cron import ( - builtin{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "golang.org/x/xerrors" + "github.com/ipfs/go-cid" +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} ) +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.CronActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + var ( Address = builtin{{.latestVersion}}.CronActorAddr Methods = builtin{{.latestVersion}}.MethodsCron ) + + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index a601f2b1e..2275e747f 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -1,10 +1,72 @@ package cron import ( + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" ) +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.CronActorCodeID, nil + + case actors.Version2: + return builtin2.CronActorCodeID, nil + + case actors.Version3: + return builtin3.CronActorCodeID, nil + + case actors.Version4: + return builtin4.CronActorCodeID, nil + + case actors.Version5: + return builtin5.CronActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + var ( Address = builtin5.CronActorAddr Methods = builtin5.MethodsCron ) + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/cron/state.go.template b/chain/actors/builtin/cron/state.go.template new file mode 100644 index 000000000..99a06d7f8 --- /dev/null +++ b/chain/actors/builtin/cron/state.go.template @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/cron" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = *cron{{.v}}.ConstructState(cron{{.v}}.BuiltInEntries()) + return &out, nil +} + +type state{{.v}} struct { + cron{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/cron/v0.go b/chain/actors/builtin/cron/v0.go new file mode 100644 index 000000000..6147b858c --- /dev/null +++ b/chain/actors/builtin/cron/v0.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = *cron0.ConstructState(cron0.BuiltInEntries()) + return &out, nil +} + +type state0 struct { + cron0.State + store adt.Store +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v2.go b/chain/actors/builtin/cron/v2.go new file mode 100644 index 000000000..51ca179d9 --- /dev/null +++ b/chain/actors/builtin/cron/v2.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/cron" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = *cron2.ConstructState(cron2.BuiltInEntries()) + return &out, nil +} + +type state2 struct { + cron2.State + store adt.Store +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v3.go b/chain/actors/builtin/cron/v3.go new file mode 100644 index 000000000..ff74d511d --- /dev/null +++ b/chain/actors/builtin/cron/v3.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/cron" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = *cron3.ConstructState(cron3.BuiltInEntries()) + return &out, nil +} + +type state3 struct { + cron3.State + store adt.Store +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v4.go b/chain/actors/builtin/cron/v4.go new file mode 100644 index 000000000..1cff8cc28 --- /dev/null +++ b/chain/actors/builtin/cron/v4.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/cron" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = *cron4.ConstructState(cron4.BuiltInEntries()) + return &out, nil +} + +type state4 struct { + cron4.State + store adt.Store +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/cron/v5.go b/chain/actors/builtin/cron/v5.go new file mode 100644 index 000000000..2bb00dc21 --- /dev/null +++ b/chain/actors/builtin/cron/v5.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = *cron5.ConstructState(cron5.BuiltInEntries()) + return &out, nil +} + +type state5 struct { + cron5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/actor.go.template b/chain/actors/builtin/init/actor.go.template index 5b700cec8..f825eb9fa 100644 --- a/chain/actors/builtin/init/actor.go.template +++ b/chain/actors/builtin/init/actor.go.template @@ -1,6 +1,7 @@ package init import ( + "github.com/filecoin-project/lotus/chain/actors" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -39,6 +40,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, networkName) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.InitActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -56,5 +78,12 @@ type State interface { // Sets the network's name. This should only be used on upgrade/fork. SetNetworkName(name string) error - addressMap() (adt.Map, error) + // Sets the next ID for the init actor. This should only be used for testing. + SetNextID(id abi.ActorID) error + + // Sets the address map for the init actor. This should only be used for testing. + SetAddressMap(mcid cid.Cid) error + + AddressMap() (adt.Map, error) + GetState() interface{} } diff --git a/chain/actors/builtin/init/diff.go b/chain/actors/builtin/init/diff.go index 593171322..5eb8f3c75 100644 --- a/chain/actors/builtin/init/diff.go +++ b/chain/actors/builtin/init/diff.go @@ -11,12 +11,12 @@ import ( ) func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) { - prem, err := pre.addressMap() + prem, err := pre.AddressMap() if err != nil { return nil, err } - curm, err := cur.addressMap() + curm, err := cur.AddressMap() if err != nil { return nil, err } diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 605f2d103..e1bd6f371 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -1,6 +1,7 @@ package init import ( + "github.com/filecoin-project/lotus/chain/actors" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -74,6 +75,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) { + switch av { + + case actors.Version0: + return make0(store, networkName) + + case actors.Version2: + return make2(store, networkName) + + case actors.Version3: + return make3(store, networkName) + + case actors.Version4: + return make4(store, networkName) + + case actors.Version5: + return make5(store, networkName) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.InitActorCodeID, nil + + case actors.Version2: + return builtin2.InitActorCodeID, nil + + case actors.Version3: + return builtin3.InitActorCodeID, nil + + case actors.Version4: + return builtin4.InitActorCodeID, nil + + case actors.Version5: + return builtin5.InitActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -91,5 +137,12 @@ type State interface { // Sets the network's name. This should only be used on upgrade/fork. SetNetworkName(name string) error - addressMap() (adt.Map, error) + // Sets the next ID for the init actor. This should only be used for testing. + SetNextID(id abi.ActorID) error + + // Sets the address map for the init actor. This should only be used for testing. + SetAddressMap(mcid cid.Cid) error + + AddressMap() (adt.Map, error) + GetState() interface{} } diff --git a/chain/actors/builtin/init/state.go.template b/chain/actors/builtin/init/state.go.template index 95f052bda..482ad4df5 100644 --- a/chain/actors/builtin/init/state.go.template +++ b/chain/actors/builtin/init/state.go.template @@ -29,6 +29,26 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store, networkName string) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + mr, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init{{.v}}.ConstructState(mr, networkName) + {{else}} + s, err := init{{.v}}.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + type state{{.v}} struct { init{{.v}}.State store adt.Store @@ -66,6 +86,11 @@ func (s *state{{.v}}) SetNetworkName(name string) error { return nil } +func (s *state{{.v}}) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) { m, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) if err != nil { @@ -84,6 +109,15 @@ func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) { return nil } -func (s *state{{.v}}) addressMap() (adt.Map, error) { - return adt{{.v}}.AsMap(s.store, s.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +func (s *state{{.v}}) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil } + +func (s *state{{.v}}) AddressMap() (adt.Map, error) { + return adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/init/v0.go b/chain/actors/builtin/init/v0.go index c019705b1..ddd2dab94 100644 --- a/chain/actors/builtin/init/v0.go +++ b/chain/actors/builtin/init/v0.go @@ -25,6 +25,19 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, networkName string) (State, error) { + out := state0{store: store} + + mr, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init0.ConstructState(mr, networkName) + + return &out, nil +} + type state0 struct { init0.State store adt.Store @@ -62,6 +75,11 @@ func (s *state0) SetNetworkName(name string) error { return nil } +func (s *state0) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state0) Remove(addrs ...address.Address) (err error) { m, err := adt0.AsMap(s.store, s.State.AddressMap) if err != nil { @@ -80,6 +98,15 @@ func (s *state0) Remove(addrs ...address.Address) (err error) { return nil } -func (s *state0) addressMap() (adt.Map, error) { - return adt0.AsMap(s.store, s.AddressMap) +func (s *state0) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state0) AddressMap() (adt.Map, error) { + return adt0.AsMap(s.store, s.State.AddressMap) +} + +func (s *state0) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/init/v2.go b/chain/actors/builtin/init/v2.go index 420243be4..72e2d56a5 100644 --- a/chain/actors/builtin/init/v2.go +++ b/chain/actors/builtin/init/v2.go @@ -25,6 +25,19 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, networkName string) (State, error) { + out := state2{store: store} + + mr, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *init2.ConstructState(mr, networkName) + + return &out, nil +} + type state2 struct { init2.State store adt.Store @@ -62,6 +75,11 @@ func (s *state2) SetNetworkName(name string) error { return nil } +func (s *state2) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state2) Remove(addrs ...address.Address) (err error) { m, err := adt2.AsMap(s.store, s.State.AddressMap) if err != nil { @@ -80,6 +98,15 @@ func (s *state2) Remove(addrs ...address.Address) (err error) { return nil } -func (s *state2) addressMap() (adt.Map, error) { - return adt2.AsMap(s.store, s.AddressMap) +func (s *state2) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state2) AddressMap() (adt.Map, error) { + return adt2.AsMap(s.store, s.State.AddressMap) +} + +func (s *state2) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/init/v3.go b/chain/actors/builtin/init/v3.go index eaa54dfd4..4609c94a3 100644 --- a/chain/actors/builtin/init/v3.go +++ b/chain/actors/builtin/init/v3.go @@ -27,6 +27,19 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store, networkName string) (State, error) { + out := state3{store: store} + + s, err := init3.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state3 struct { init3.State store adt.Store @@ -64,6 +77,11 @@ func (s *state3) SetNetworkName(name string) error { return nil } +func (s *state3) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state3) Remove(addrs ...address.Address) (err error) { m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) if err != nil { @@ -82,6 +100,15 @@ func (s *state3) Remove(addrs ...address.Address) (err error) { return nil } -func (s *state3) addressMap() (adt.Map, error) { - return adt3.AsMap(s.store, s.AddressMap, builtin3.DefaultHamtBitwidth) +func (s *state3) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state3) AddressMap() (adt.Map, error) { + return adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) +} + +func (s *state3) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/init/v4.go b/chain/actors/builtin/init/v4.go index 38749eed5..dc56d1f19 100644 --- a/chain/actors/builtin/init/v4.go +++ b/chain/actors/builtin/init/v4.go @@ -27,6 +27,19 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store, networkName string) (State, error) { + out := state4{store: store} + + s, err := init4.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state4 struct { init4.State store adt.Store @@ -64,6 +77,11 @@ func (s *state4) SetNetworkName(name string) error { return nil } +func (s *state4) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state4) Remove(addrs ...address.Address) (err error) { m, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) if err != nil { @@ -82,6 +100,15 @@ func (s *state4) Remove(addrs ...address.Address) (err error) { return nil } -func (s *state4) addressMap() (adt.Map, error) { - return adt4.AsMap(s.store, s.AddressMap, builtin4.DefaultHamtBitwidth) +func (s *state4) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state4) AddressMap() (adt.Map, error) { + return adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) +} + +func (s *state4) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/init/v5.go b/chain/actors/builtin/init/v5.go index 01c20fc23..107366de5 100644 --- a/chain/actors/builtin/init/v5.go +++ b/chain/actors/builtin/init/v5.go @@ -27,6 +27,19 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store, networkName string) (State, error) { + out := state5{store: store} + + s, err := init5.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state5 struct { init5.State store adt.Store @@ -64,6 +77,11 @@ func (s *state5) SetNetworkName(name string) error { return nil } +func (s *state5) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + func (s *state5) Remove(addrs ...address.Address) (err error) { m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) if err != nil { @@ -82,6 +100,15 @@ func (s *state5) Remove(addrs ...address.Address) (err error) { return nil } -func (s *state5) addressMap() (adt.Map, error) { - return adt5.AsMap(s.store, s.AddressMap, builtin5.DefaultHamtBitwidth) +func (s *state5) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state5) AddressMap() (adt.Map, error) { + return adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) +} + +func (s *state5) GetState() interface{} { + return &s.State } diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template index eaa06b867..f78c84b8f 100644 --- a/chain/actors/builtin/market/actor.go.template +++ b/chain/actors/builtin/market/actor.go.template @@ -5,6 +5,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/cbor" "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -15,6 +16,7 @@ import ( {{end}} "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" ) @@ -41,6 +43,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.StorageMarketActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler BalancesChanged(State) (bool, error) @@ -55,6 +78,7 @@ type State interface { minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, ) (weight, verifiedWeight abi.DealWeight, err error) NextID() (abi.DealID, error) + GetState() interface{} } type BalanceTable interface { @@ -92,51 +116,67 @@ type DealState struct { } type DealProposal struct { - PieceCID cid.Cid - PieceSize abi.PaddedPieceSize - VerifiedDeal bool - Client address.Address - Provider address.Address - Label string - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch + PieceCID cid.Cid + PieceSize abi.PaddedPieceSize + VerifiedDeal bool + Client address.Address + Provider address.Address + Label string + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch StoragePricePerEpoch abi.TokenAmount - ProviderCollateral abi.TokenAmount - ClientCollateral abi.TokenAmount + ProviderCollateral abi.TokenAmount + ClientCollateral abi.TokenAmount } type DealStateChanges struct { - Added []DealIDState + Added []DealIDState Modified []DealStateChange - Removed []DealIDState + Removed []DealIDState } type DealIDState struct { - ID abi.DealID + ID abi.DealID Deal DealState } // DealStateChange is a change in deal state from -> to type DealStateChange struct { - ID abi.DealID + ID abi.DealID From *DealState - To *DealState + To *DealState } type DealProposalChanges struct { - Added []ProposalIDState + Added []ProposalIDState Removed []ProposalIDState } type ProposalIDState struct { - ID abi.DealID + ID abi.DealID Proposal DealProposal } func EmptyDealState() *DealState { return &DealState{ SectorStartEpoch: -1, - SlashEpoch: -1, + SlashEpoch: -1, LastUpdatedEpoch: -1, } -} \ No newline at end of file +} + +// returns the earned fees and pending fees for a given deal +func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) { + tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch))) + + ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch))) + if ef.LessThan(big.Zero()) { + ef = big.Zero() + } + + if ef.GreaterThan(tf) { + ef = tf + } + + return ef, big.Sub(tf, ef) +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 8f5964ecc..026e35d4e 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -5,6 +5,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/cbor" "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -21,6 +22,7 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" @@ -76,6 +78,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.StorageMarketActorCodeID, nil + + case actors.Version2: + return builtin2.StorageMarketActorCodeID, nil + + case actors.Version3: + return builtin3.StorageMarketActorCodeID, nil + + case actors.Version4: + return builtin4.StorageMarketActorCodeID, nil + + case actors.Version5: + return builtin5.StorageMarketActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler BalancesChanged(State) (bool, error) @@ -90,6 +137,7 @@ type State interface { minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, ) (weight, verifiedWeight abi.DealWeight, err error) NextID() (abi.DealID, error) + GetState() interface{} } type BalanceTable interface { @@ -175,3 +223,19 @@ func EmptyDealState() *DealState { LastUpdatedEpoch: -1, } } + +// returns the earned fees and pending fees for a given deal +func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) { + tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch))) + + ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch))) + if ef.LessThan(big.Zero()) { + ef = big.Zero() + } + + if ef.GreaterThan(tf) { + ef = tf + } + + return ef, big.Sub(tf, ef) +} diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template index a55743ce5..70b731148 100644 --- a/chain/actors/builtin/market/state.go.template +++ b/chain/actors/builtin/market/state.go.template @@ -26,6 +26,31 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + ea, err := adt{{.v}}.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market{{.v}}.ConstructState(ea, em, em) + {{else}} + s, err := market{{.v}}.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + type state{{.v}} struct { market{{.v}}.State store adt.Store @@ -207,3 +232,7 @@ func (s *dealProposals{{.v}}) array() adt.Array { func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal { return (DealProposal)(v{{.v}}) } + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go index 175c0a2ea..b3093b54b 100644 --- a/chain/actors/builtin/market/v0.go +++ b/chain/actors/builtin/market/v0.go @@ -26,6 +26,24 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + + ea, err := adt0.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market0.ConstructState(ea, em, em) + + return &out, nil +} + type state0 struct { market0.State store adt.Store @@ -207,3 +225,7 @@ func (s *dealProposals0) array() adt.Array { func fromV0DealProposal(v0 market0.DealProposal) DealProposal { return (DealProposal)(v0) } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go index dafae8feb..fdedcce85 100644 --- a/chain/actors/builtin/market/v2.go +++ b/chain/actors/builtin/market/v2.go @@ -26,6 +26,24 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + + ea, err := adt2.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *market2.ConstructState(ea, em, em) + + return &out, nil +} + type state2 struct { market2.State store adt.Store @@ -207,3 +225,7 @@ func (s *dealProposals2) array() adt.Array { func fromV2DealProposal(v2 market2.DealProposal) DealProposal { return (DealProposal)(v2) } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go index dec8d6e25..53d266443 100644 --- a/chain/actors/builtin/market/v3.go +++ b/chain/actors/builtin/market/v3.go @@ -26,6 +26,19 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store) (State, error) { + out := state3{store: store} + + s, err := market3.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state3 struct { market3.State store adt.Store @@ -207,3 +220,7 @@ func (s *dealProposals3) array() adt.Array { func fromV3DealProposal(v3 market3.DealProposal) DealProposal { return (DealProposal)(v3) } + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go index 22514395c..30aa26920 100644 --- a/chain/actors/builtin/market/v4.go +++ b/chain/actors/builtin/market/v4.go @@ -26,6 +26,19 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store) (State, error) { + out := state4{store: store} + + s, err := market4.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state4 struct { market4.State store adt.Store @@ -207,3 +220,7 @@ func (s *dealProposals4) array() adt.Array { func fromV4DealProposal(v4 market4.DealProposal) DealProposal { return (DealProposal)(v4) } + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go index 9acd3d57e..12378c76d 100644 --- a/chain/actors/builtin/market/v5.go +++ b/chain/actors/builtin/market/v5.go @@ -26,6 +26,19 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := market5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state5 struct { market5.State store adt.Store @@ -207,3 +220,7 @@ func (s *dealProposals5) array() adt.Array { func fromV5DealProposal(v5 market5.DealProposal) DealProposal { return (DealProposal)(v5) } + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index 4b3d8db5e..8d46f99fd 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -3,6 +3,7 @@ package miner import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" @@ -21,6 +22,7 @@ import ( miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" {{range .versions}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}} @@ -60,6 +62,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.StorageMinerActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -75,9 +98,18 @@ type State interface { FindSector(abi.SectorNumber) (*SectorLocation, error) GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) + ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) NumLiveSectors() (uint64, error) IsAllocated(abi.SectorNumber) (bool, error) + // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than + // count if there aren't enough). + UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) + + // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors + GetProvingPeriodStart() (abi.ChainEpoch, error) + // Testing only + EraseAllUnproven() error LoadDeadline(idx uint64) (Deadline, error) ForEachDeadline(cb func(idx uint64, dl Deadline) error) error @@ -95,6 +127,7 @@ type State interface { decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) precommits() (adt.Map, error) decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error) + GetState() interface{} } type Deadline interface { @@ -148,6 +181,7 @@ type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams +type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 195e8f177..995dc78cb 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -3,6 +3,7 @@ package miner import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" @@ -21,6 +22,7 @@ import ( miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -95,6 +97,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.StorageMinerActorCodeID, nil + + case actors.Version2: + return builtin2.StorageMinerActorCodeID, nil + + case actors.Version3: + return builtin3.StorageMinerActorCodeID, nil + + case actors.Version4: + return builtin4.StorageMinerActorCodeID, nil + + case actors.Version5: + return builtin5.StorageMinerActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -110,9 +157,18 @@ type State interface { FindSector(abi.SectorNumber) (*SectorLocation, error) GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) + ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) NumLiveSectors() (uint64, error) IsAllocated(abi.SectorNumber) (bool, error) + // UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than + // count if there aren't enough). + UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) + + // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors + GetProvingPeriodStart() (abi.ChainEpoch, error) + // Testing only + EraseAllUnproven() error LoadDeadline(idx uint64) (Deadline, error) ForEachDeadline(cb func(idx uint64, dl Deadline) error) error @@ -130,6 +186,7 @@ type State interface { decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) precommits() (adt.Map, error) decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error) + GetState() interface{} } type Deadline interface { @@ -183,6 +240,7 @@ type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams +type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index 0769eea10..eb7ab00bf 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -8,6 +8,7 @@ import ( {{end}} "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -35,6 +36,12 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = miner{{.v}}.State{} + return &out, nil +} + type state{{.v}} struct { miner{{.v}}.State store adt.Store @@ -203,6 +210,26 @@ func (s *state{{.v}}) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCom return &ret, nil } +func (s *state{{.v}}) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { +{{if (ge .v 3) -}} + precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors, builtin{{.v}}.DefaultHamtBitwidth) +{{- else -}} + precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors) +{{- end}} + if err != nil { + return err + } + + var info miner{{.v}}.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV{{.v}}SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -236,15 +263,61 @@ func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo return infos, nil } -func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state{{.v}}) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return false, err } return allocatedSectors.IsSet(uint64(num)) } +func (s *state{{.v}}) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{ {Val: true, Len: abi.MaxSectorNumber} }}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { @@ -366,6 +439,45 @@ func (s *state{{.v}}) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (Secto return fromV{{.v}}SectorPreCommitOnChainInfo(sp), nil } +func (s *state{{.v}}) EraseAllUnproven() error { + {{if (ge .v 2)}} + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner{{.v}}.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner{{.v}}.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + {{else}} + // field doesn't exist until v2 + {{end}} + return nil +} + func (d *deadline{{.v}}) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -458,3 +570,7 @@ func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOn return (SectorPreCommitOnChainInfo)(v0) {{end}} } + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 2dc8ae23e..c5e887481 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -8,6 +8,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -32,6 +33,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = miner0.State{} + return &out, nil +} + type state0 struct { miner0.State store adt.Store @@ -200,6 +207,22 @@ func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn return &ret, nil } +func (s *state0) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt0.AsMap(s.store, s.State.PreCommittedSectors) + if err != nil { + return err + } + + var info miner0.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV0SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner0.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -233,15 +256,61 @@ func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err return infos, nil } -func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state0) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return false, err } return allocatedSectors.IsSet(uint64(num)) } +func (s *state0) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + func (s *state0) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { @@ -363,6 +432,13 @@ func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC return fromV0SectorPreCommitOnChainInfo(sp), nil } +func (s *state0) EraseAllUnproven() error { + + // field doesn't exist until v2 + + return nil +} + func (d *deadline0) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -426,3 +502,7 @@ func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) Sect return (SectorPreCommitOnChainInfo)(v0) } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index 7564dd8b8..45d4a7165 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -30,6 +31,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = miner2.State{} + return &out, nil +} + type state2 struct { miner2.State store adt.Store @@ -198,6 +205,22 @@ func (s *state2) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn return &ret, nil } +func (s *state2) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt2.AsMap(s.store, s.State.PreCommittedSectors) + if err != nil { + return err + } + + var info miner2.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV2SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner2.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -231,15 +254,61 @@ func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err return infos, nil } -func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state2) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return false, err } return allocatedSectors.IsSet(uint64(num)) } +func (s *state2) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + func (s *state2) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { @@ -361,6 +430,43 @@ func (s *state2) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC return fromV2SectorPreCommitOnChainInfo(sp), nil } +func (s *state2) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner2.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner2.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + func (d *deadline2) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -442,3 +548,7 @@ func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) Sect } } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go index 72a080f73..166abe1e7 100644 --- a/chain/actors/builtin/miner/v3.go +++ b/chain/actors/builtin/miner/v3.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -32,6 +33,12 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = miner3.State{} + return &out, nil +} + type state3 struct { miner3.State store adt.Store @@ -200,6 +207,22 @@ func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn return &ret, nil } +func (s *state3) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt3.AsMap(s.store, s.State.PreCommittedSectors, builtin3.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner3.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV3SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner3.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -233,15 +256,61 @@ func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err return infos, nil } -func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state3) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return false, err } return allocatedSectors.IsSet(uint64(num)) } +func (s *state3) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + func (s *state3) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { @@ -358,6 +427,43 @@ func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC return fromV3SectorPreCommitOnChainInfo(sp), nil } +func (s *state3) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner3.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner3.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + func (d *deadline3) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -443,3 +549,7 @@ func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) Sect } } + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go index 698bdf2f5..71a2b9f9d 100644 --- a/chain/actors/builtin/miner/v4.go +++ b/chain/actors/builtin/miner/v4.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -32,6 +33,12 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = miner4.State{} + return &out, nil +} + type state4 struct { miner4.State store adt.Store @@ -200,6 +207,22 @@ func (s *state4) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn return &ret, nil } +func (s *state4) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt4.AsMap(s.store, s.State.PreCommittedSectors, builtin4.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner4.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV4SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner4.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -233,15 +256,61 @@ func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err return infos, nil } -func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state4) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return false, err } return allocatedSectors.IsSet(uint64(num)) } +func (s *state4) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + func (s *state4) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { @@ -358,6 +427,43 @@ func (s *state4) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC return fromV4SectorPreCommitOnChainInfo(sp), nil } +func (s *state4) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner4.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner4.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + func (d *deadline4) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -443,3 +549,7 @@ func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) Sect } } + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go index eea00b6e9..568834777 100644 --- a/chain/actors/builtin/miner/v5.go +++ b/chain/actors/builtin/miner/v5.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" @@ -32,6 +33,12 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = miner5.State{} + return &out, nil +} + type state5 struct { miner5.State store adt.Store @@ -200,6 +207,22 @@ func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn return &ret, nil } +func (s *state5) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt5.AsMap(s.store, s.State.PreCommittedSectors, builtin5.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner5.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV5SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { sectors, err := miner5.LoadSectors(s.store, s.State.Sectors) if err != nil { @@ -233,15 +256,61 @@ func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err return infos, nil } -func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) { +func (s *state5) loadAllocatedSectorNumbers() (bitfield.BitField, error) { var allocatedSectors bitfield.BitField - if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { return false, err } return allocatedSectors.IsSet(uint64(num)) } +func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + func (s *state5) LoadDeadline(idx uint64) (Deadline, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { @@ -358,6 +427,43 @@ func (s *state5) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC return fromV5SectorPreCommitOnChainInfo(sp), nil } +func (s *state5) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner5.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner5.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + + return s.State.SaveDeadlines(s.store, dls) + + return nil +} + func (d *deadline5) LoadPartition(idx uint64) (Partition, error) { p, err := d.Deadline.LoadPartition(d.store, idx) if err != nil { @@ -443,3 +549,7 @@ func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) Sect } } + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/actor.go.template b/chain/actors/builtin/multisig/actor.go.template index f741eba93..b899815a6 100644 --- a/chain/actors/builtin/multisig/actor.go.template +++ b/chain/actors/builtin/multisig/actor.go.template @@ -41,6 +41,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, signers, threshold, startEpoch, unlockDuration, initialBalance) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.MultisigActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -56,6 +77,7 @@ type State interface { transactions() (adt.Map, error) decodeTransaction(val *cbg.Deferred) (Transaction, error) + GetState() interface{} } type Transaction = msig0.Transaction @@ -93,6 +115,7 @@ type MessageBuilder interface { type ProposalHashData = msig{{.latestVersion}}.ProposalHashData type ProposeReturn = msig{{.latestVersion}}.ProposeReturn type ProposeParams = msig{{.latestVersion}}.ProposeParams +type ApproveReturn = msig{{.latestVersion}}.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)} diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index eafd418e0..c950ced90 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -76,6 +76,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + switch av { + + case actors.Version0: + return make0(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version2: + return make2(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version3: + return make3(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version4: + return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + case actors.Version5: + return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.MultisigActorCodeID, nil + + case actors.Version2: + return builtin2.MultisigActorCodeID, nil + + case actors.Version3: + return builtin3.MultisigActorCodeID, nil + + case actors.Version4: + return builtin4.MultisigActorCodeID, nil + + case actors.Version5: + return builtin5.MultisigActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -91,6 +136,7 @@ type State interface { transactions() (adt.Map, error) decodeTransaction(val *cbg.Deferred) (Transaction, error) + GetState() interface{} } type Transaction = msig0.Transaction @@ -140,6 +186,7 @@ type MessageBuilder interface { type ProposalHashData = msig5.ProposalHashData type ProposeReturn = msig5.ProposeReturn type ProposeParams = msig5.ProposeParams +type ApproveReturn = msig5.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { params := msig5.TxnIDParams{ID: msig5.TxnID(id)} diff --git a/chain/actors/builtin/multisig/state.go.template b/chain/actors/builtin/multisig/state.go.template index 2316aadba..6c0130c09 100644 --- a/chain/actors/builtin/multisig/state.go.template +++ b/chain/actors/builtin/multisig/state.go.template @@ -31,6 +31,32 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state{{.v}}{store: store} + out.State = msig{{.v}}.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + {{else}} + em, err := adt{{.v}}.StoreEmptyMap(store, builtin{{.v}}.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + {{end}} + return &out, nil +} + type state{{.v}} struct { msig{{.v}}.State store adt.Store @@ -95,3 +121,7 @@ func (s *state{{.v}}) decodeTransaction(val *cbg.Deferred) (Transaction, error) } return tx, nil } + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v0.go b/chain/actors/builtin/multisig/v0.go index 20c1557b0..973ac9209 100644 --- a/chain/actors/builtin/multisig/v0.go +++ b/chain/actors/builtin/multisig/v0.go @@ -28,6 +28,25 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state0{store: store} + out.State = msig0.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + type state0 struct { msig0.State store adt.Store @@ -92,3 +111,7 @@ func (s *state0) decodeTransaction(val *cbg.Deferred) (Transaction, error) { } return tx, nil } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v2.go b/chain/actors/builtin/multisig/v2.go index ef317f903..5b830e695 100644 --- a/chain/actors/builtin/multisig/v2.go +++ b/chain/actors/builtin/multisig/v2.go @@ -28,6 +28,25 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state2{store: store} + out.State = msig2.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + type state2 struct { msig2.State store adt.Store @@ -92,3 +111,7 @@ func (s *state2) decodeTransaction(val *cbg.Deferred) (Transaction, error) { } return tx, nil } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v3.go b/chain/actors/builtin/multisig/v3.go index 8834e4553..c4a2791b7 100644 --- a/chain/actors/builtin/multisig/v3.go +++ b/chain/actors/builtin/multisig/v3.go @@ -30,6 +30,25 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state3{store: store} + out.State = msig3.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt3.StoreEmptyMap(store, builtin3.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + type state3 struct { msig3.State store adt.Store @@ -94,3 +113,7 @@ func (s *state3) decodeTransaction(val *cbg.Deferred) (Transaction, error) { } return tx, nil } + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v4.go b/chain/actors/builtin/multisig/v4.go index 9f9dc7573..a35a890f8 100644 --- a/chain/actors/builtin/multisig/v4.go +++ b/chain/actors/builtin/multisig/v4.go @@ -30,6 +30,25 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state4{store: store} + out.State = msig4.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt4.StoreEmptyMap(store, builtin4.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + type state4 struct { msig4.State store adt.Store @@ -94,3 +113,7 @@ func (s *state4) decodeTransaction(val *cbg.Deferred) (Transaction, error) { } return tx, nil } + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/v5.go b/chain/actors/builtin/multisig/v5.go index e879783ba..4ad9aea94 100644 --- a/chain/actors/builtin/multisig/v5.go +++ b/chain/actors/builtin/multisig/v5.go @@ -30,6 +30,25 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state5{store: store} + out.State = msig5.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt5.StoreEmptyMap(store, builtin5.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + type state5 struct { msig5.State store adt.Store @@ -94,3 +113,7 @@ func (s *state5) decodeTransaction(val *cbg.Deferred) (Transaction, error) { } return tx, nil } + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/paych/actor.go.template b/chain/actors/builtin/paych/actor.go.template index 3f68a5cfa..7699e76b6 100644 --- a/chain/actors/builtin/paych/actor.go.template +++ b/chain/actors/builtin/paych/actor.go.template @@ -42,6 +42,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.PaymentChannelActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + // State is an abstract version of payment channel state that works across // versions type State interface { @@ -62,6 +83,8 @@ type State interface { // Iterate lane states ForEachLaneState(cb func(idx uint64, dl LaneState) error) error + + GetState() interface{} } // LaneState is an abstract copy of the state of a single lane diff --git a/chain/actors/builtin/paych/mock/mock.go b/chain/actors/builtin/paych/mock/mock.go index 3b82511ff..1ecfa1130 100644 --- a/chain/actors/builtin/paych/mock/mock.go +++ b/chain/actors/builtin/paych/mock/mock.go @@ -17,6 +17,10 @@ type mockState struct { lanes map[uint64]paych.LaneState } +func (ms *mockState) GetState() interface{} { + panic("implement me") +} + type mockLaneState struct { redeemed big.Int nonce uint64 diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index bafd6e94d..d87f70f0c 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -77,6 +77,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.PaymentChannelActorCodeID, nil + + case actors.Version2: + return builtin2.PaymentChannelActorCodeID, nil + + case actors.Version3: + return builtin3.PaymentChannelActorCodeID, nil + + case actors.Version4: + return builtin4.PaymentChannelActorCodeID, nil + + case actors.Version5: + return builtin5.PaymentChannelActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + // State is an abstract version of payment channel state that works across // versions type State interface { @@ -97,6 +142,8 @@ type State interface { // Iterate lane states ForEachLaneState(cb func(idx uint64, dl LaneState) error) error + + GetState() interface{} } // LaneState is an abstract copy of the state of a single lane diff --git a/chain/actors/builtin/paych/state.go.template b/chain/actors/builtin/paych/state.go.template index b4b575a3e..3e41f5be5 100644 --- a/chain/actors/builtin/paych/state.go.template +++ b/chain/actors/builtin/paych/state.go.template @@ -24,6 +24,12 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = paych{{.v}}.State{} + return &out, nil +} + type state{{.v}} struct { paych{{.v}}.State store adt.Store @@ -74,6 +80,10 @@ func (s *state{{.v}}) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state{{.v}}) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/paych/v0.go b/chain/actors/builtin/paych/v0.go index 8e0e3434e..e9bc30e3d 100644 --- a/chain/actors/builtin/paych/v0.go +++ b/chain/actors/builtin/paych/v0.go @@ -24,6 +24,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = paych0.State{} + return &out, nil +} + type state0 struct { paych0.State store adt.Store @@ -74,6 +80,10 @@ func (s *state0) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state0) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state0) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/paych/v2.go b/chain/actors/builtin/paych/v2.go index fbf4b9fde..400305e2f 100644 --- a/chain/actors/builtin/paych/v2.go +++ b/chain/actors/builtin/paych/v2.go @@ -24,6 +24,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = paych2.State{} + return &out, nil +} + type state2 struct { paych2.State store adt.Store @@ -74,6 +80,10 @@ func (s *state2) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state2) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state2) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/paych/v3.go b/chain/actors/builtin/paych/v3.go index 14bb4cb61..1d7c2f94b 100644 --- a/chain/actors/builtin/paych/v3.go +++ b/chain/actors/builtin/paych/v3.go @@ -24,6 +24,12 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = paych3.State{} + return &out, nil +} + type state3 struct { paych3.State store adt.Store @@ -74,6 +80,10 @@ func (s *state3) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state3) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state3) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/paych/v4.go b/chain/actors/builtin/paych/v4.go index cf37eea5c..b7d1e52a5 100644 --- a/chain/actors/builtin/paych/v4.go +++ b/chain/actors/builtin/paych/v4.go @@ -24,6 +24,12 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = paych4.State{} + return &out, nil +} + type state4 struct { paych4.State store adt.Store @@ -74,6 +80,10 @@ func (s *state4) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state4) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state4) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/paych/v5.go b/chain/actors/builtin/paych/v5.go index f878d858c..b331a1500 100644 --- a/chain/actors/builtin/paych/v5.go +++ b/chain/actors/builtin/paych/v5.go @@ -24,6 +24,12 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = paych5.State{} + return &out, nil +} + type state5 struct { paych5.State store adt.Store @@ -74,6 +80,10 @@ func (s *state5) LaneCount() (uint64, error) { return lsamt.Length(), nil } +func (s *state5) GetState() interface{} { + return &s.State +} + // Iterate lane states func (s *state5) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { // Get the lane state from the chain diff --git a/chain/actors/builtin/power/actor.go.template b/chain/actors/builtin/power/actor.go.template index 82f791e58..fe11fc160 100644 --- a/chain/actors/builtin/power/actor.go.template +++ b/chain/actors/builtin/power/actor.go.template @@ -3,6 +3,7 @@ package power import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -40,6 +41,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.StoragePowerActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -47,6 +69,7 @@ type State interface { TotalPower() (Claim, error) TotalCommitted() (Claim, error) TotalPowerSmoothed() (builtin.FilterEstimate, error) + GetState() interface{} // MinerCounts returns the number of miners. Participating is the number // with power above the minimum miner threshold. @@ -57,6 +80,12 @@ type State interface { ForEachClaim(func(miner address.Address, claim Claim) error) error ClaimsChanged(State) (bool, error) + // Testing or genesis setup only + SetTotalQualityAdjPower(abi.StoragePower) error + SetTotalRawBytePower(abi.StoragePower) error + SetThisEpochQualityAdjPower(abi.StoragePower) error + SetThisEpochRawBytePower(abi.StoragePower) error + // Diff helpers. Used by Diff* functions internally. claims() (adt.Map, error) decodeClaim(*cbg.Deferred) (Claim, error) diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index 2cf59ef02..5b4aa1b04 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -3,6 +3,7 @@ package power import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -75,6 +76,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.StoragePowerActorCodeID, nil + + case actors.Version2: + return builtin2.StoragePowerActorCodeID, nil + + case actors.Version3: + return builtin3.StoragePowerActorCodeID, nil + + case actors.Version4: + return builtin4.StoragePowerActorCodeID, nil + + case actors.Version5: + return builtin5.StoragePowerActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -82,6 +128,7 @@ type State interface { TotalPower() (Claim, error) TotalCommitted() (Claim, error) TotalPowerSmoothed() (builtin.FilterEstimate, error) + GetState() interface{} // MinerCounts returns the number of miners. Participating is the number // with power above the minimum miner threshold. @@ -92,6 +139,12 @@ type State interface { ForEachClaim(func(miner address.Address, claim Claim) error) error ClaimsChanged(State) (bool, error) + // Testing or genesis setup only + SetTotalQualityAdjPower(abi.StoragePower) error + SetTotalRawBytePower(abi.StoragePower) error + SetThisEpochQualityAdjPower(abi.StoragePower) error + SetThisEpochRawBytePower(abi.StoragePower) error + // Diff helpers. Used by Diff* functions internally. claims() (adt.Map, error) decodeClaim(*cbg.Deferred) (Claim, error) diff --git a/chain/actors/builtin/power/state.go.template b/chain/actors/builtin/power/state.go.template index 4cb904a1d..fcdc5c350 100644 --- a/chain/actors/builtin/power/state.go.template +++ b/chain/actors/builtin/power/state.go.template @@ -29,6 +29,32 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt{{.v}}.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power{{.v}}.ConstructState(em, emm) + {{else}} + s, err := power{{.v}}.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + + return &out, nil +} + type state{{.v}} struct { power{{.v}}.State store adt.Store @@ -131,6 +157,30 @@ func (s *state{{.v}}) ClaimsChanged(other State) (bool, error) { return !s.State.Claims.Equals(other{{.v}}.State.Claims), nil } +func (s *state{{.v}}) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state{{.v}}) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state{{.v}}) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state{{.v}}) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} + func (s *state{{.v}}) claims() (adt.Map, error) { return adt{{.v}}.AsMap(s.store, s.Claims{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) } diff --git a/chain/actors/builtin/power/v0.go b/chain/actors/builtin/power/v0.go index 91fad8c57..465d16c5c 100644 --- a/chain/actors/builtin/power/v0.go +++ b/chain/actors/builtin/power/v0.go @@ -26,6 +26,24 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store) (State, error) { + out := state0{store: store} + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt0.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power0.ConstructState(em, emm) + + return &out, nil +} + type state0 struct { power0.State store adt.Store @@ -128,6 +146,30 @@ func (s *state0) ClaimsChanged(other State) (bool, error) { return !s.State.Claims.Equals(other0.State.Claims), nil } +func (s *state0) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state0) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state0) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state0) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state0) GetState() interface{} { + return &s.State +} + func (s *state0) claims() (adt.Map, error) { return adt0.AsMap(s.store, s.Claims) } diff --git a/chain/actors/builtin/power/v2.go b/chain/actors/builtin/power/v2.go index 313160a78..606534cef 100644 --- a/chain/actors/builtin/power/v2.go +++ b/chain/actors/builtin/power/v2.go @@ -26,6 +26,24 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store) (State, error) { + out := state2{store: store} + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + emm, err := adt2.MakeEmptyMultimap(store).Root() + if err != nil { + return nil, err + } + + out.State = *power2.ConstructState(em, emm) + + return &out, nil +} + type state2 struct { power2.State store adt.Store @@ -128,6 +146,30 @@ func (s *state2) ClaimsChanged(other State) (bool, error) { return !s.State.Claims.Equals(other2.State.Claims), nil } +func (s *state2) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state2) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state2) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state2) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state2) GetState() interface{} { + return &s.State +} + func (s *state2) claims() (adt.Map, error) { return adt2.AsMap(s.store, s.Claims) } diff --git a/chain/actors/builtin/power/v3.go b/chain/actors/builtin/power/v3.go index 2ef1e2808..3dec3c63e 100644 --- a/chain/actors/builtin/power/v3.go +++ b/chain/actors/builtin/power/v3.go @@ -28,6 +28,19 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store) (State, error) { + out := state3{store: store} + + s, err := power3.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state3 struct { power3.State store adt.Store @@ -130,6 +143,30 @@ func (s *state3) ClaimsChanged(other State) (bool, error) { return !s.State.Claims.Equals(other3.State.Claims), nil } +func (s *state3) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state3) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state3) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state3) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state3) GetState() interface{} { + return &s.State +} + func (s *state3) claims() (adt.Map, error) { return adt3.AsMap(s.store, s.Claims, builtin3.DefaultHamtBitwidth) } diff --git a/chain/actors/builtin/power/v4.go b/chain/actors/builtin/power/v4.go index 686550456..b73eedf5a 100644 --- a/chain/actors/builtin/power/v4.go +++ b/chain/actors/builtin/power/v4.go @@ -28,6 +28,19 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store) (State, error) { + out := state4{store: store} + + s, err := power4.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state4 struct { power4.State store adt.Store @@ -130,6 +143,30 @@ func (s *state4) ClaimsChanged(other State) (bool, error) { return !s.State.Claims.Equals(other4.State.Claims), nil } +func (s *state4) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state4) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state4) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state4) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state4) GetState() interface{} { + return &s.State +} + func (s *state4) claims() (adt.Map, error) { return adt4.AsMap(s.store, s.Claims, builtin4.DefaultHamtBitwidth) } diff --git a/chain/actors/builtin/power/v5.go b/chain/actors/builtin/power/v5.go index 33a5d3b62..84b23a577 100644 --- a/chain/actors/builtin/power/v5.go +++ b/chain/actors/builtin/power/v5.go @@ -28,6 +28,19 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store) (State, error) { + out := state5{store: store} + + s, err := power5.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state5 struct { power5.State store adt.Store @@ -130,6 +143,30 @@ func (s *state5) ClaimsChanged(other State) (bool, error) { return !s.State.Claims.Equals(other5.State.Claims), nil } +func (s *state5) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state5) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state5) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state5) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state5) GetState() interface{} { + return &s.State +} + func (s *state5) claims() (adt.Map, error) { return adt5.AsMap(s.store, s.Claims, builtin5.DefaultHamtBitwidth) } diff --git a/chain/actors/builtin/reward/actor.go.template b/chain/actors/builtin/reward/actor.go.template index 81437d26f..89cdddaec 100644 --- a/chain/actors/builtin/reward/actor.go.template +++ b/chain/actors/builtin/reward/actor.go.template @@ -4,6 +4,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" "github.com/ipfs/go-cid" + "github.com/filecoin-project/lotus/chain/actors" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/cbor" @@ -38,6 +39,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.StoragePower) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, currRealizedPower) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.RewardActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -55,6 +77,7 @@ type State interface { InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error) PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error) + GetState() interface{} } type AwardBlockRewardParams = reward0.AwardBlockRewardParams diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 5f6131334..ebec85517 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -2,6 +2,7 @@ package reward import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -73,6 +74,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.StoragePower) (State, error) { + switch av { + + case actors.Version0: + return make0(store, currRealizedPower) + + case actors.Version2: + return make2(store, currRealizedPower) + + case actors.Version3: + return make3(store, currRealizedPower) + + case actors.Version4: + return make4(store, currRealizedPower) + + case actors.Version5: + return make5(store, currRealizedPower) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.RewardActorCodeID, nil + + case actors.Version2: + return builtin2.RewardActorCodeID, nil + + case actors.Version3: + return builtin3.RewardActorCodeID, nil + + case actors.Version4: + return builtin4.RewardActorCodeID, nil + + case actors.Version5: + return builtin5.RewardActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -90,6 +136,7 @@ type State interface { InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error) PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error) + GetState() interface{} } type AwardBlockRewardParams = reward0.AwardBlockRewardParams diff --git a/chain/actors/builtin/reward/state.go.template b/chain/actors/builtin/reward/state.go.template index 1758d1413..2bc271cbb 100644 --- a/chain/actors/builtin/reward/state.go.template +++ b/chain/actors/builtin/reward/state.go.template @@ -23,6 +23,12 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state{{.v}}{store: store} + out.State = *reward{{.v}}.ConstructState(currRealizedPower) + return &out, nil +} + type state{{.v}} struct { reward{{.v}}.State store adt.Store @@ -101,3 +107,7 @@ func (s *state{{.v}}) PreCommitDepositForPower(networkQAPower builtin.FilterEsti }, sectorWeight), nil } + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v0.go b/chain/actors/builtin/reward/v0.go index fe053cc16..cd098c151 100644 --- a/chain/actors/builtin/reward/v0.go +++ b/chain/actors/builtin/reward/v0.go @@ -23,6 +23,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state0{store: store} + out.State = *reward0.ConstructState(currRealizedPower) + return &out, nil +} + type state0 struct { reward0.State store adt.Store @@ -83,3 +89,7 @@ func (s *state0) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, }, sectorWeight), nil } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v2.go b/chain/actors/builtin/reward/v2.go index 90621e467..08e9a7bc3 100644 --- a/chain/actors/builtin/reward/v2.go +++ b/chain/actors/builtin/reward/v2.go @@ -23,6 +23,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state2{store: store} + out.State = *reward2.ConstructState(currRealizedPower) + return &out, nil +} + type state2 struct { reward2.State store adt.Store @@ -86,3 +92,7 @@ func (s *state2) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, }, sectorWeight), nil } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v3.go b/chain/actors/builtin/reward/v3.go index 926cc085b..fd9fa56e2 100644 --- a/chain/actors/builtin/reward/v3.go +++ b/chain/actors/builtin/reward/v3.go @@ -23,6 +23,12 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state3{store: store} + out.State = *reward3.ConstructState(currRealizedPower) + return &out, nil +} + type state3 struct { reward3.State store adt.Store @@ -86,3 +92,7 @@ func (s *state3) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, }, sectorWeight), nil } + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v4.go b/chain/actors/builtin/reward/v4.go index f034b0018..310ca04e8 100644 --- a/chain/actors/builtin/reward/v4.go +++ b/chain/actors/builtin/reward/v4.go @@ -23,6 +23,12 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state4{store: store} + out.State = *reward4.ConstructState(currRealizedPower) + return &out, nil +} + type state4 struct { reward4.State store adt.Store @@ -86,3 +92,7 @@ func (s *state4) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, }, sectorWeight), nil } + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/reward/v5.go b/chain/actors/builtin/reward/v5.go index 0dd75de73..7200f7d11 100644 --- a/chain/actors/builtin/reward/v5.go +++ b/chain/actors/builtin/reward/v5.go @@ -23,6 +23,12 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state5{store: store} + out.State = *reward5.ConstructState(currRealizedPower) + return &out, nil +} + type state5 struct { reward5.State store adt.Store @@ -86,3 +92,7 @@ func (s *state5) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, }, sectorWeight), nil } + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/actor.go.template b/chain/actors/builtin/system/actor.go.template new file mode 100644 index 000000000..925319970 --- /dev/null +++ b/chain/actors/builtin/system/actor.go.template @@ -0,0 +1,41 @@ +package system + +import ( + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors" + "golang.org/x/xerrors" + "github.com/ipfs/go-cid" + +{{range .versions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" +{{end}} +) + +var ( + Address = builtin{{.latestVersion}}.SystemActorAddr +) + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.SystemActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/system/state.go.template b/chain/actors/builtin/system/state.go.template new file mode 100644 index 000000000..fa644f8c7 --- /dev/null +++ b/chain/actors/builtin/system/state.go.template @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/system" +) + +var _ State = (*state{{.v}})(nil) + +func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { + out := state{{.v}}{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make{{.v}}(store adt.Store) (State, error) { + out := state{{.v}}{store: store} + out.State = system{{.v}}.State{} + return &out, nil +} + +type state{{.v}} struct { + system{{.v}}.State + store adt.Store +} + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go new file mode 100644 index 000000000..289fb4d5d --- /dev/null +++ b/chain/actors/builtin/system/system.go @@ -0,0 +1,71 @@ +package system + +import ( + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" + + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" +) + +var ( + Address = builtin5.SystemActorAddr +) + +func MakeState(store adt.Store, av actors.Version) (State, error) { + switch av { + + case actors.Version0: + return make0(store) + + case actors.Version2: + return make2(store) + + case actors.Version3: + return make3(store) + + case actors.Version4: + return make4(store) + + case actors.Version5: + return make5(store) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.SystemActorCodeID, nil + + case actors.Version2: + return builtin2.SystemActorCodeID, nil + + case actors.Version3: + return builtin3.SystemActorCodeID, nil + + case actors.Version4: + return builtin4.SystemActorCodeID, nil + + case actors.Version5: + return builtin5.SystemActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + +type State interface { + GetState() interface{} +} diff --git a/chain/actors/builtin/system/v0.go b/chain/actors/builtin/system/v0.go new file mode 100644 index 000000000..64c6f53d3 --- /dev/null +++ b/chain/actors/builtin/system/v0.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system0 "github.com/filecoin-project/specs-actors/actors/builtin/system" +) + +var _ State = (*state0)(nil) + +func load0(store adt.Store, root cid.Cid) (State, error) { + out := state0{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make0(store adt.Store) (State, error) { + out := state0{store: store} + out.State = system0.State{} + return &out, nil +} + +type state0 struct { + system0.State + store adt.Store +} + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v2.go b/chain/actors/builtin/system/v2.go new file mode 100644 index 000000000..eb540891c --- /dev/null +++ b/chain/actors/builtin/system/v2.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/system" +) + +var _ State = (*state2)(nil) + +func load2(store adt.Store, root cid.Cid) (State, error) { + out := state2{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make2(store adt.Store) (State, error) { + out := state2{store: store} + out.State = system2.State{} + return &out, nil +} + +type state2 struct { + system2.State + store adt.Store +} + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v3.go b/chain/actors/builtin/system/v3.go new file mode 100644 index 000000000..5b04e189e --- /dev/null +++ b/chain/actors/builtin/system/v3.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/system" +) + +var _ State = (*state3)(nil) + +func load3(store adt.Store, root cid.Cid) (State, error) { + out := state3{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make3(store adt.Store) (State, error) { + out := state3{store: store} + out.State = system3.State{} + return &out, nil +} + +type state3 struct { + system3.State + store adt.Store +} + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v4.go b/chain/actors/builtin/system/v4.go new file mode 100644 index 000000000..b6c924978 --- /dev/null +++ b/chain/actors/builtin/system/v4.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/system" +) + +var _ State = (*state4)(nil) + +func load4(store adt.Store, root cid.Cid) (State, error) { + out := state4{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make4(store adt.Store) (State, error) { + out := state4{store: store} + out.State = system4.State{} + return &out, nil +} + +type state4 struct { + system4.State + store adt.Store +} + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/v5.go b/chain/actors/builtin/system/v5.go new file mode 100644 index 000000000..77d2a8478 --- /dev/null +++ b/chain/actors/builtin/system/v5.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/system" +) + +var _ State = (*state5)(nil) + +func load5(store adt.Store, root cid.Cid) (State, error) { + out := state5{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make5(store adt.Store) (State, error) { + out := state5{store: store} + out.State = system5.State{} + return &out, nil +} + +type state5 struct { + system5.State + store adt.Store +} + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template index 22e809ccf..9ea8e155a 100644 --- a/chain/actors/builtin/verifreg/actor.go.template +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" ) @@ -40,6 +41,28 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Address) (State, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return make{{.}}(store, rootKeyAddress) +{{end}} +} + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { +{{range .versions}} + case actors.Version{{.}}: + return builtin{{.}}.VerifiedRegistryActorCodeID, nil +{{end}} + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + + type State interface { cbor.Marshaler @@ -48,4 +71,5 @@ type State interface { VerifierDataCap(address.Address) (bool, abi.StoragePower, error) ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + GetState() interface{} } diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template index 244d20932..b59cfb628 100644 --- a/chain/actors/builtin/verifreg/state.go.template +++ b/chain/actors/builtin/verifreg/state.go.template @@ -24,6 +24,26 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make{{.v}}(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state{{.v}}{store: store} + {{if (le .v 2)}} + em, err := adt{{.v}}.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg{{.v}}.ConstructState(em, rootKeyAddress) + {{else}} + s, err := verifreg{{.v}}.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + {{end}} + return &out, nil +} + type state{{.v}} struct { verifreg{{.v}}.State store adt.Store @@ -56,3 +76,7 @@ func (s *state{{.v}}) verifiedClients() (adt.Map, error) { func (s *state{{.v}}) verifiers() (adt.Map, error) { return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) } + +func (s *state{{.v}}) GetState() interface{} { + return &s.State +} \ No newline at end of file diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index 0dc4696f4..e70b0e3c9 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -23,6 +23,19 @@ func load0(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make0(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state0{store: store} + + em, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg0.ConstructState(em, rootKeyAddress) + + return &out, nil +} + type state0 struct { verifreg0.State store adt.Store @@ -55,3 +68,7 @@ func (s *state0) verifiedClients() (adt.Map, error) { func (s *state0) verifiers() (adt.Map, error) { return adt0.AsMap(s.store, s.Verifiers) } + +func (s *state0) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index a5ef84532..0bcbe0212 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -23,6 +23,19 @@ func load2(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make2(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state2{store: store} + + em, err := adt2.MakeEmptyMap(store).Root() + if err != nil { + return nil, err + } + + out.State = *verifreg2.ConstructState(em, rootKeyAddress) + + return &out, nil +} + type state2 struct { verifreg2.State store adt.Store @@ -55,3 +68,7 @@ func (s *state2) verifiedClients() (adt.Map, error) { func (s *state2) verifiers() (adt.Map, error) { return adt2.AsMap(s.store, s.Verifiers) } + +func (s *state2) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go index fb0c46d0c..32003ca3a 100644 --- a/chain/actors/builtin/verifreg/v3.go +++ b/chain/actors/builtin/verifreg/v3.go @@ -24,6 +24,19 @@ func load3(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make3(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state3{store: store} + + s, err := verifreg3.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state3 struct { verifreg3.State store adt.Store @@ -56,3 +69,7 @@ func (s *state3) verifiedClients() (adt.Map, error) { func (s *state3) verifiers() (adt.Map, error) { return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth) } + +func (s *state3) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go index 2419ef758..b752e747b 100644 --- a/chain/actors/builtin/verifreg/v4.go +++ b/chain/actors/builtin/verifreg/v4.go @@ -24,6 +24,19 @@ func load4(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make4(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state4{store: store} + + s, err := verifreg4.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state4 struct { verifreg4.State store adt.Store @@ -56,3 +69,7 @@ func (s *state4) verifiedClients() (adt.Map, error) { func (s *state4) verifiers() (adt.Map, error) { return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth) } + +func (s *state4) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go index 367d38498..6fefd7115 100644 --- a/chain/actors/builtin/verifreg/v5.go +++ b/chain/actors/builtin/verifreg/v5.go @@ -24,6 +24,19 @@ func load5(store adt.Store, root cid.Cid) (State, error) { return &out, nil } +func make5(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state5{store: store} + + s, err := verifreg5.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + type state5 struct { verifreg5.State store adt.Store @@ -56,3 +69,7 @@ func (s *state5) verifiedClients() (adt.Map, error) { func (s *state5) verifiers() (adt.Map, error) { return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth) } + +func (s *state5) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index baca66177..88104ad69 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -19,6 +19,7 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" @@ -75,6 +76,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) { return nil, xerrors.Errorf("unknown actor code %s", act.Code) } +func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Address) (State, error) { + switch av { + + case actors.Version0: + return make0(store, rootKeyAddress) + + case actors.Version2: + return make2(store, rootKeyAddress) + + case actors.Version3: + return make3(store, rootKeyAddress) + + case actors.Version4: + return make4(store, rootKeyAddress) + + case actors.Version5: + return make5(store, rootKeyAddress) + + } + return nil, xerrors.Errorf("unknown actor version %d", av) +} + +func GetActorCodeID(av actors.Version) (cid.Cid, error) { + switch av { + + case actors.Version0: + return builtin0.VerifiedRegistryActorCodeID, nil + + case actors.Version2: + return builtin2.VerifiedRegistryActorCodeID, nil + + case actors.Version3: + return builtin3.VerifiedRegistryActorCodeID, nil + + case actors.Version4: + return builtin4.VerifiedRegistryActorCodeID, nil + + case actors.Version5: + return builtin5.VerifiedRegistryActorCodeID, nil + + } + + return cid.Undef, xerrors.Errorf("unknown actor version %d", av) +} + type State interface { cbor.Marshaler @@ -83,4 +129,5 @@ type State interface { VerifierDataCap(address.Address) (bool, abi.StoragePower, error) ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error + GetState() interface{} } diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index c159dc98f..c06c85d38 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -196,6 +196,33 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) ab } } +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { + + market2.ProviderCollateralSupplyTarget = builtin2.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market3.ProviderCollateralSupplyTarget = builtin3.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market4.ProviderCollateralSupplyTarget = builtin4.BigFrac{ + Numerator: num, + Denominator: denom, + } + + market5.ProviderCollateralSupplyTarget = builtin5.BigFrac{ + Numerator: num, + Denominator: denom, + } + +} + func DealProviderCollateralBounds( size abi.PaddedPieceSize, verified bool, rawBytePower, qaPower, baselinePower abi.StoragePower, diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template index e27aac6e8..3257feffd 100644 --- a/chain/actors/policy/policy.go.template +++ b/chain/actors/policy/policy.go.template @@ -10,14 +10,14 @@ import ( "github.com/filecoin-project/lotus/chain/actors" {{range .versions}} - {{if (ge . 2)}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}} + {{if (ge . 2)}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}} market{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/market" miner{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/miner" verifreg{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/verifreg" {{if (eq . 0)}} power{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/power" {{end}} - {{end}} + {{end}} - paych{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/paych" + paych{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/paych" ) const ( @@ -31,15 +31,15 @@ const ( // This should only be used for testing. func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { {{range .versions}} - {{if (eq . 0)}} - miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) - {{else if (le . 4)}} - miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) - miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) - miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) - {{else}} - miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) - {{end}} + {{if (eq . 0)}} + miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{else if (le . 4)}} + miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) + miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{else}} + miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + {{end}} {{end}} AddSupportedProofTypes(types...) @@ -80,9 +80,9 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { // actors versions. Use for testing. func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { // Set for all miner versions. - {{range .versions}} - miner{{.}}.PreCommitChallengeDelay = delay - {{end}} + {{range .versions}} + miner{{.}}.PreCommitChallengeDelay = delay + {{end}} } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. @@ -94,42 +94,56 @@ func GetPreCommitChallengeDelay() abi.ChainEpoch { // meet for leader election, across all actor versions. This should only be used // for testing. func SetConsensusMinerMinPower(p abi.StoragePower) { - {{range .versions}} - {{if (eq . 0)}} - power{{.}}.ConsensusMinerMinPower = p - {{else if (eq . 2)}} - for _, policy := range builtin{{.}}.SealProofPolicies { - policy.ConsensusMinerMinPower = p - } - {{else}} - for _, policy := range builtin{{.}}.PoStProofPolicies { - policy.ConsensusMinerMinPower = p - } - {{end}} - {{end}} + {{range .versions}} + {{if (eq . 0)}} + power{{.}}.ConsensusMinerMinPower = p + {{else if (eq . 2)}} + for _, policy := range builtin{{.}}.SealProofPolicies { + policy.ConsensusMinerMinPower = p + } + {{else}} + for _, policy := range builtin{{.}}.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + {{end}} + {{end}} } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should // only be used for testing. func SetMinVerifiedDealSize(size abi.StoragePower) { - {{range .versions}} - verifreg{{.}}.MinVerifiedDealSize = size - {{end}} + {{range .versions}} + verifreg{{.}}.MinVerifiedDealSize = size + {{end}} } func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { switch ver { {{range .versions}} - case actors.Version{{.}}: - {{if (eq . 0)}} - return miner{{.}}.MaxSealDuration[t] - {{else}} - return miner{{.}}.MaxProveCommitDuration[t] - {{end}} - {{end}} - default: - panic("unsupported actors version") - } + case actors.Version{{.}}: + {{if (eq . 0)}} + return miner{{.}}.MaxSealDuration[t] + {{else}} + return miner{{.}}.MaxProveCommitDuration[t] + {{end}} + {{end}} + default: + panic("unsupported actors version") + } +} + +// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating +// supply that must be covered by provider collateral in a deal. This should +// only be used for testing. +func SetProviderCollateralSupplyTarget(num, denom big.Int) { +{{range .versions}} + {{if (ge . 2)}} + market{{.}}.ProviderCollateralSupplyTarget = builtin{{.}}.BigFrac{ + Numerator: num, + Denominator: denom, + } + {{end}} +{{end}} } func DealProviderCollateralBounds( @@ -138,14 +152,14 @@ func DealProviderCollateralBounds( circulatingFil abi.TokenAmount, nwVer network.Version, ) (min, max abi.TokenAmount) { switch actors.VersionForNetwork(nwVer) { - {{range .versions}} - case actors.Version{{.}}: - {{if (eq . 0)}} - return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) - {{else}} - return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) - {{end}} - {{end}} + {{range .versions}} + case actors.Version{{.}}: + {{if (eq . 0)}} + return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + {{else}} + return market{{.}}.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + {{end}} + {{end}} default: panic("unsupported actors version") } @@ -158,15 +172,15 @@ func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) // Sets the challenge window and scales the proving period to match (such that // there are always 48 challenge windows in a proving period). func SetWPoStChallengeWindow(period abi.ChainEpoch) { - {{range .versions}} - miner{{.}}.WPoStChallengeWindow = period - miner{{.}}.WPoStProvingPeriod = period * abi.ChainEpoch(miner{{.}}.WPoStPeriodDeadlines) - {{if (ge . 3)}} - // by default, this is 2x finality which is 30 periods. - // scale it if we're scaling the challenge period. - miner{{.}}.WPoStDisputeWindow = period * 30 - {{end}} - {{end}} + {{range .versions}} + miner{{.}}.WPoStChallengeWindow = period + miner{{.}}.WPoStProvingPeriod = period * abi.ChainEpoch(miner{{.}}.WPoStPeriodDeadlines) + {{if (ge . 3)}} + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner{{.}}.WPoStDisputeWindow = period * 30 + {{end}} + {{end}} } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -174,7 +188,7 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { return 10 } - // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well + // NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well return ChainFinality } @@ -211,7 +225,7 @@ func GetDefaultSectorSize() abi.SectorSize { } func GetDefaultAggregationProof() abi.RegisteredAggregationProof { - return abi.RegisteredAggregationProof_SnarkPackV1 + return abi.RegisteredAggregationProof_SnarkPackV1 } func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { @@ -224,10 +238,10 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) func GetAddressedSectorsMax(nwVer network.Version) int { switch actors.VersionForNetwork(nwVer) { - {{range .versions}} - case actors.Version{{.}}: - return miner{{.}}.AddressedSectorsMax - {{end}} + {{range .versions}} + case actors.Version{{.}}: + return miner{{.}}.AddressedSectorsMax + {{end}} default: panic("unsupported network version") } @@ -235,15 +249,15 @@ func GetAddressedSectorsMax(nwVer network.Version) int { func GetDeclarationsMax(nwVer network.Version) int { switch actors.VersionForNetwork(nwVer) { - {{range .versions}} - case actors.Version{{.}}: - {{if (eq . 0)}} - // TODO: Should we instead panic here since the concept doesn't exist yet? - return miner{{.}}.AddressedPartitionsMax - {{else}} - return miner{{.}}.DeclarationsMax - {{end}} - {{end}} + {{range .versions}} + case actors.Version{{.}}: + {{if (eq . 0)}} + // TODO: Should we instead panic here since the concept doesn't exist yet? + return miner{{.}}.AddressedPartitionsMax + {{else}} + return miner{{.}}.DeclarationsMax + {{end}} + {{end}} default: panic("unsupported network version") } diff --git a/chain/actors/version.go b/chain/actors/version.go index e6ca2e9bd..9710e62fa 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -8,6 +8,10 @@ import ( type Version int +var LatestVersion = 5 + +var Versions = []int{0, 2, 3, 4, LatestVersion} + const ( Version0 Version = 0 Version2 Version = 2 diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 847091858..e7f673d7f 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -168,8 +168,8 @@ func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { if !ok { return nil } - e, _ := v.(*types.BeaconEntry) - return e + e, _ := v.(types.BeaconEntry) + return &e } func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { @@ -178,6 +178,9 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr return nil } if be := db.getCachedValue(curr.Round); be != nil { + if !bytes.Equal(curr.Data, be.Data) { + return xerrors.New("invalid beacon value, does not match cached good value") + } // return no error if the value is in the cache already return nil } diff --git a/chain/checkpoint.go b/chain/checkpoint.go index 8f99d73e4..a3660a45c 100644 --- a/chain/checkpoint.go +++ b/chain/checkpoint.go @@ -1,81 +1,57 @@ package chain import ( - "encoding/json" + "context" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/ipfs/go-datastore" "golang.org/x/xerrors" ) -var CheckpointKey = datastore.NewKey("/chain/checks") - -func loadCheckpoint(ds dtypes.MetadataDS) (types.TipSetKey, error) { - haveChks, err := ds.Has(CheckpointKey) - if err != nil { - return types.EmptyTSK, err - } - - if !haveChks { - return types.EmptyTSK, nil - } - - tskBytes, err := ds.Get(CheckpointKey) - if err != nil { - return types.EmptyTSK, err - } - - var tsk types.TipSetKey - err = json.Unmarshal(tskBytes, &tsk) - if err != nil { - return types.EmptyTSK, err - } - - return tsk, err -} - -func (syncer *Syncer) SetCheckpoint(tsk types.TipSetKey) error { +func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error { if tsk == types.EmptyTSK { return xerrors.Errorf("called with empty tsk") } - syncer.checkptLk.Lock() - defer syncer.checkptLk.Unlock() - ts, err := syncer.ChainStore().LoadTipSet(tsk) if err != nil { - return xerrors.Errorf("cannot find tipset: %w", err) + tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1) + if err != nil { + return xerrors.Errorf("failed to fetch tipset: %w", err) + } else if len(tss) != 1 { + return xerrors.Errorf("expected 1 tipset, got %d", len(tss)) + } + ts = tss[0] } - hts := syncer.ChainStore().GetHeaviestTipSet() - anc, err := syncer.ChainStore().IsAncestorOf(ts, hts) - if err != nil { - return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) + if err := syncer.switchChain(ctx, ts); err != nil { + return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err) } - if !hts.Equals(ts) && !anc { - return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) + if err := syncer.ChainStore().SetCheckpoint(ts); err != nil { + return xerrors.Errorf("failed to set the chain checkpoint: %w", err) } - tskBytes, err := json.Marshal(tsk) - if err != nil { - return err - } - - err = syncer.ds.Put(CheckpointKey, tskBytes) - if err != nil { - return err - } - - syncer.checkpt = tsk - return nil } -func (syncer *Syncer) GetCheckpoint() types.TipSetKey { - syncer.checkptLk.Lock() - defer syncer.checkptLk.Unlock() - return syncer.checkpt +func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { + hts := syncer.ChainStore().GetHeaviestTipSet() + if hts.Equals(ts) { + return nil + } + + if anc, err := syncer.store.IsAncestorOf(ts, hts); err == nil && anc { + return nil + } + + // Otherwise, sync the chain and set the head. + if err := syncer.collectChain(ctx, ts, hts, true); err != nil { + return xerrors.Errorf("failed to collect chain for checkpoint: %w", err) + } + + if err := syncer.ChainStore().SetHead(ts); err != nil { + return xerrors.Errorf("failed to set the chain head: %w", err) + } + return nil } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 98bb3ea91..424ee6edc 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -198,6 +200,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS sys := vm.Syscalls(&genFakeVerifier{}) tpl := genesis.Template{ + NetworkVersion: network.Version0, Accounts: []genesis.Actor{ { Type: genesis.TAccount, diff --git a/chain/gen/genesis/f00_system.go b/chain/gen/genesis/f00_system.go index 015dfac4a..4fde27107 100644 --- a/chain/gen/genesis/f00_system.go +++ b/chain/gen/genesis/f00_system.go @@ -3,28 +3,39 @@ package genesis import ( "context" - "github.com/filecoin-project/specs-actors/actors/builtin/system" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/system" - "github.com/filecoin-project/specs-actors/actors/builtin" cbor "github.com/ipfs/go-ipld-cbor" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" ) -func SetupSystemActor(bs bstore.Blockstore) (*types.Actor, error) { - var st system.State +func SetupSystemActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { cst := cbor.NewCborStore(bs) + st, err := system.MakeState(adt.WrapStore(ctx, cst), av) + if err != nil { + return nil, err + } - statecid, err := cst.Put(context.TODO(), &st) + statecid, err := cst.Put(ctx, st.GetState()) + if err != nil { + return nil, err + } + + actcid, err := system.GetActorCodeID(av) if err != nil { return nil, err } act := &types.Actor{ - Code: builtin.SystemActorCodeID, - Head: statecid, + Code: actcid, + Head: statecid, + Balance: big.Zero(), } return act, nil diff --git a/chain/gen/genesis/f01_init.go b/chain/gen/genesis/f01_init.go index 718eb4480..61ec91703 100644 --- a/chain/gen/genesis/f01_init.go +++ b/chain/gen/genesis/f01_init.go @@ -5,13 +5,16 @@ import ( "encoding/json" "fmt" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/util/adt" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" cbor "github.com/ipfs/go-ipld-cbor" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -21,17 +24,25 @@ import ( "github.com/filecoin-project/lotus/genesis" ) -func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor, remainder genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) { +func SetupInitActor(ctx context.Context, bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor, remainder genesis.Actor, av actors.Version) (int64, *types.Actor, map[address.Address]address.Address, error) { if len(initialActors) > MaxAccounts { return 0, nil, nil, xerrors.New("too many initial actors") } - var ias init_.State - ias.NextID = MinerStart - ias.NetworkName = netname + cst := cbor.NewCborStore(bs) + ist, err := init_.MakeState(adt.WrapStore(ctx, cst), av, netname) + if err != nil { + return 0, nil, nil, err + } - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - amap := adt.MakeEmptyMap(store) + if err = ist.SetNextID(MinerStart); err != nil { + return 0, nil, nil, err + } + + amap, err := ist.AddressMap() + if err != nil { + return 0, nil, nil, err + } keyToId := map[address.Address]address.Address{} counter := int64(AccountStart) @@ -155,16 +166,25 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi if err != nil { return 0, nil, nil, err } - ias.AddressMap = amapaddr - statecid, err := store.Put(store.Context(), &ias) + if err = ist.SetAddressMap(amapaddr); err != nil { + return 0, nil, nil, err + } + + statecid, err := cst.Put(ctx, ist.GetState()) + if err != nil { + return 0, nil, nil, err + } + + actcid, err := init_.GetActorCodeID(av) if err != nil { return 0, nil, nil, err } act := &types.Actor{ - Code: builtin.InitActorCodeID, - Head: statecid, + Code: actcid, + Head: statecid, + Balance: big.Zero(), } return counter, act, keyToId, nil diff --git a/chain/gen/genesis/f02_reward.go b/chain/gen/genesis/f02_reward.go index e218da6fe..c8f479722 100644 --- a/chain/gen/genesis/f02_reward.go +++ b/chain/gen/genesis/f02_reward.go @@ -3,10 +3,12 @@ package genesis import ( "context" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" cbor "github.com/ipfs/go-ipld-cbor" bstore "github.com/filecoin-project/lotus/blockstore" @@ -14,19 +16,28 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func SetupRewardActor(bs bstore.Blockstore, qaPower big.Int) (*types.Actor, error) { +func SetupRewardActor(ctx context.Context, bs bstore.Blockstore, qaPower big.Int, av actors.Version) (*types.Actor, error) { cst := cbor.NewCborStore(bs) - - st := reward0.ConstructState(qaPower) - - hcid, err := cst.Put(context.TODO(), st) + rst, err := reward.MakeState(adt.WrapStore(ctx, cst), av, qaPower) if err != nil { return nil, err } - return &types.Actor{ - Code: builtin.RewardActorCodeID, + statecid, err := cst.Put(ctx, rst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := reward.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, Balance: types.BigInt{Int: build.InitialRewardBalance}, - Head: hcid, - }, nil + Head: statecid, + } + + return act, nil } diff --git a/chain/gen/genesis/f03_cron.go b/chain/gen/genesis/f03_cron.go index dd43a59a4..c9dd0d341 100644 --- a/chain/gen/genesis/f03_cron.go +++ b/chain/gen/genesis/f03_cron.go @@ -3,27 +3,39 @@ package genesis import ( "context" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/cron" + cbor "github.com/ipfs/go-ipld-cbor" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" ) -func SetupCronActor(bs bstore.Blockstore) (*types.Actor, error) { +func SetupCronActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { cst := cbor.NewCborStore(bs) - cas := cron.ConstructState(cron.BuiltInEntries()) - - stcid, err := cst.Put(context.TODO(), cas) + st, err := cron.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) if err != nil { return nil, err } - return &types.Actor{ - Code: builtin.CronActorCodeID, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil + statecid, err := cst.Put(ctx, st.GetState()) + if err != nil { + return nil, err + } + + actcid, err := cron.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil } diff --git a/chain/gen/genesis/f04_power.go b/chain/gen/genesis/f04_power.go index ed349c18b..b5e08cebe 100644 --- a/chain/gen/genesis/f04_power.go +++ b/chain/gen/genesis/f04_power.go @@ -3,44 +3,41 @@ package genesis import ( "context" - "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/specs-actors/actors/util/adt" - power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" cbor "github.com/ipfs/go-ipld-cbor" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" ) -func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) { - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - emptyMap, err := adt.MakeEmptyMap(store).Root() +func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + + cst := cbor.NewCborStore(bs) + pst, err := power.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) if err != nil { return nil, err } - multiMap, err := adt.AsMultimap(store, emptyMap) + statecid, err := cst.Put(ctx, pst.GetState()) if err != nil { return nil, err } - emptyMultiMap, err := multiMap.Root() + actcid, err := power.GetActorCodeID(av) if err != nil { return nil, err } - sms := power0.ConstructState(emptyMap, emptyMultiMap) - - stcid, err := store.Put(store.Context(), sms) - if err != nil { - return nil, err + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), } - return &types.Actor{ - Code: builtin.StoragePowerActorCodeID, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil + return act, nil } diff --git a/chain/gen/genesis/f05_market.go b/chain/gen/genesis/f05_market.go index f7ac26f43..ac32294c9 100644 --- a/chain/gen/genesis/f05_market.go +++ b/chain/gen/genesis/f05_market.go @@ -3,38 +3,38 @@ package genesis import ( "context" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + cbor "github.com/ipfs/go-ipld-cbor" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/types" ) -func SetupStorageMarketActor(bs bstore.Blockstore) (*types.Actor, error) { - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - - a, err := adt.MakeEmptyArray(store).Root() - if err != nil { - return nil, err - } - h, err := adt.MakeEmptyMap(store).Root() +func SetupStorageMarketActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + mst, err := market.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) if err != nil { return nil, err } - sms := market.ConstructState(a, h, h) + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return nil, err + } - stcid, err := store.Put(store.Context(), sms) + actcid, err := market.GetActorCodeID(av) if err != nil { return nil, err } act := &types.Actor{ - Code: builtin.StorageMarketActorCodeID, - Head: stcid, - Balance: types.NewInt(0), + Code: actcid, + Head: statecid, + Balance: big.Zero(), } return act, nil diff --git a/chain/gen/genesis/f06_vreg.go b/chain/gen/genesis/f06_vreg.go index 1ba8abede..e61c951f5 100644 --- a/chain/gen/genesis/f06_vreg.go +++ b/chain/gen/genesis/f06_vreg.go @@ -3,11 +3,14 @@ package genesis import ( "context" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/go-address" cbor "github.com/ipfs/go-ipld-cbor" - "github.com/filecoin-project/specs-actors/actors/builtin" - verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" "github.com/filecoin-project/specs-actors/actors/util/adt" bstore "github.com/filecoin-project/lotus/blockstore" @@ -26,25 +29,27 @@ func init() { RootVerifierID = idk } -func SetupVerifiedRegistryActor(bs bstore.Blockstore) (*types.Actor, error) { - store := adt.WrapStore(context.TODO(), cbor.NewCborStore(bs)) - - h, err := adt.MakeEmptyMap(store).Root() +func SetupVerifiedRegistryActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + vst, err := verifreg.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av, RootVerifierID) if err != nil { return nil, err } - sms := verifreg0.ConstructState(h, RootVerifierID) + statecid, err := cst.Put(ctx, vst.GetState()) + if err != nil { + return nil, err + } - stcid, err := store.Put(store.Context(), sms) + actcid, err := verifreg.GetActorCodeID(av) if err != nil { return nil, err } act := &types.Actor{ - Code: builtin.VerifiedRegistryActorCodeID, - Head: stcid, - Balance: types.NewInt(0), + Code: actcid, + Head: statecid, + Balance: big.Zero(), } return act, nil diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 4b86db550..6dec3fea6 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -6,6 +6,32 @@ import ( "encoding/json" "fmt" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/account" + + "github.com/filecoin-project/lotus/chain/actors" + + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + + "github.com/filecoin-project/lotus/chain/actors/builtin/cron" + + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + + "github.com/filecoin-project/lotus/chain/actors/builtin/system" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/journal" @@ -21,11 +47,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" - builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - account0 "github.com/filecoin-project/specs-actors/actors/builtin/account" - multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" @@ -118,94 +139,92 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return nil, nil, xerrors.Errorf("putting empty object: %w", err) } - state, err := state.NewStateTree(cst, types.StateTreeVersion0) + sv, err := state.VersionForNetwork(template.NetworkVersion) + if err != nil { + return nil, nil, xerrors.Errorf("getting state tree version: %w", err) + } + + state, err := state.NewStateTree(cst, sv) if err != nil { return nil, nil, xerrors.Errorf("making new state tree: %w", err) } + av := actors.VersionForNetwork(template.NetworkVersion) + // Create system actor - sysact, err := SetupSystemActor(bs) + sysact, err := SetupSystemActor(ctx, bs, av) if err != nil { - return nil, nil, xerrors.Errorf("setup init actor: %w", err) + return nil, nil, xerrors.Errorf("setup system actor: %w", err) } - if err := state.SetActor(builtin0.SystemActorAddr, sysact); err != nil { - return nil, nil, xerrors.Errorf("set init actor: %w", err) + if err := state.SetActor(system.Address, sysact); err != nil { + return nil, nil, xerrors.Errorf("set system actor: %w", err) } // Create init actor - idStart, initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount) + idStart, initact, keyIDs, err := SetupInitActor(ctx, bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount, av) if err != nil { return nil, nil, xerrors.Errorf("setup init actor: %w", err) } - if err := state.SetActor(builtin0.InitActorAddr, initact); err != nil { + if err := state.SetActor(init_.Address, initact); err != nil { return nil, nil, xerrors.Errorf("set init actor: %w", err) } // Setup reward - // RewardActor's state is overrwritten by SetupStorageMiners - rewact, err := SetupRewardActor(bs, big.Zero()) + // RewardActor's state is overwritten by SetupStorageMiners, but needs to exist for miner creation messages + rewact, err := SetupRewardActor(ctx, bs, big.Zero(), av) if err != nil { - return nil, nil, xerrors.Errorf("setup init actor: %w", err) + return nil, nil, xerrors.Errorf("setup reward actor: %w", err) } - err = state.SetActor(builtin0.RewardActorAddr, rewact) + err = state.SetActor(reward.Address, rewact) if err != nil { - return nil, nil, xerrors.Errorf("set network account actor: %w", err) + return nil, nil, xerrors.Errorf("set reward actor: %w", err) } // Setup cron - cronact, err := SetupCronActor(bs) + cronact, err := SetupCronActor(ctx, bs, av) if err != nil { return nil, nil, xerrors.Errorf("setup cron actor: %w", err) } - if err := state.SetActor(builtin0.CronActorAddr, cronact); err != nil { + if err := state.SetActor(cron.Address, cronact); err != nil { return nil, nil, xerrors.Errorf("set cron actor: %w", err) } // Create empty power actor - spact, err := SetupStoragePowerActor(bs) + spact, err := SetupStoragePowerActor(ctx, bs, av) if err != nil { - return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + return nil, nil, xerrors.Errorf("setup storage power actor: %w", err) } - if err := state.SetActor(builtin0.StoragePowerActorAddr, spact); err != nil { - return nil, nil, xerrors.Errorf("set storage market actor: %w", err) + if err := state.SetActor(power.Address, spact); err != nil { + return nil, nil, xerrors.Errorf("set storage power actor: %w", err) } // Create empty market actor - marketact, err := SetupStorageMarketActor(bs) + marketact, err := SetupStorageMarketActor(ctx, bs, av) if err != nil { return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) } - if err := state.SetActor(builtin0.StorageMarketActorAddr, marketact); err != nil { - return nil, nil, xerrors.Errorf("set market actor: %w", err) + if err := state.SetActor(market.Address, marketact); err != nil { + return nil, nil, xerrors.Errorf("set storage market actor: %w", err) } // Create verified registry - verifact, err := SetupVerifiedRegistryActor(bs) + verifact, err := SetupVerifiedRegistryActor(ctx, bs, av) if err != nil { - return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + return nil, nil, xerrors.Errorf("setup verified registry market actor: %w", err) } - if err := state.SetActor(builtin0.VerifiedRegistryActorAddr, verifact); err != nil { - return nil, nil, xerrors.Errorf("set market actor: %w", err) + if err := state.SetActor(verifreg.Address, verifact); err != nil { + return nil, nil, xerrors.Errorf("set verified registry actor: %w", err) } - burntRoot, err := cst.Put(ctx, &account0.State{ - Address: builtin0.BurntFundsActorAddr, - }) + bact, err := makeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero()) if err != nil { - return nil, nil, xerrors.Errorf("failed to setup burnt funds actor state: %w", err) + return nil, nil, xerrors.Errorf("setup burnt funds actor state: %w", err) } - - // Setup burnt-funds - err = state.SetActor(builtin0.BurntFundsActorAddr, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: types.NewInt(0), - Head: burntRoot, - }) - if err != nil { - return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err) + if err := state.SetActor(builtin.BurntFundsActorAddr, bact); err != nil { + return nil, nil, xerrors.Errorf("set burnt funds actor: %w", err) } // Create accounts @@ -213,7 +232,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge switch info.Type { case genesis.TAccount: - if err := createAccountActor(ctx, cst, state, info, keyIDs); err != nil { + if err := createAccountActor(ctx, cst, state, info, keyIDs, av); err != nil { return nil, nil, xerrors.Errorf("failed to create account actor: %w", err) } @@ -225,7 +244,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge } idStart++ - if err := createMultisigAccount(ctx, bs, cst, state, ida, info, keyIDs); err != nil { + if err := createMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil { return nil, nil, err } default: @@ -240,26 +259,21 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge if err := json.Unmarshal(template.VerifregRootKey.Meta, &ainfo); err != nil { return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) } - st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner}) - if err != nil { - return nil, nil, err - } _, ok := keyIDs[ainfo.Owner] if ok { return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner) } - err = state.SetActor(builtin.RootVerifierAddress, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: template.VerifregRootKey.Balance, - Head: st, - }) + vact, err := makeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance) if err != nil { - return nil, nil, xerrors.Errorf("setting verifreg rootkey account: %w", err) + return nil, nil, xerrors.Errorf("setup verifreg rootkey account state: %w", err) + } + if err = state.SetActor(builtin.RootVerifierAddress, vact); err != nil { + return nil, nil, xerrors.Errorf("set verifreg rootkey account actor: %w", err) } case genesis.TMultisig: - if err = createMultisigAccount(ctx, bs, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs); err != nil { + if err = createMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil { return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err) } default: @@ -288,27 +302,21 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return nil, nil, err } - verifierState, err := cst.Put(ctx, &account0.State{Address: verifierAd}) + verifierAct, err := makeAccountActor(ctx, cst, av, verifierAd, big.Zero()) if err != nil { - return nil, nil, err + return nil, nil, xerrors.Errorf("setup first verifier state: %w", err) } - err = state.SetActor(verifierId, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: types.NewInt(0), - Head: verifierState, - }) - if err != nil { - return nil, nil, xerrors.Errorf("setting account from actmap: %w", err) + if err = state.SetActor(verifierId, verifierAct); err != nil { + return nil, nil, xerrors.Errorf("set first verifier actor: %w", err) } totalFilAllocated := big.Zero() - // flush as ForEach works on the HAMT - if _, err := state.Flush(ctx); err != nil { - return nil, nil, err - } err = state.ForEach(func(addr address.Address, act *types.Actor) error { + if act.Balance.Nil() { + panic(fmt.Sprintf("actor %s (%s) has nil balance", addr, builtin.ActorNameByCode(act.Code))) + } totalFilAllocated = big.Add(totalFilAllocated, act.Balance) return nil }) @@ -337,13 +345,13 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge } keyIDs[ainfo.Owner] = builtin.ReserveAddress - err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs) + err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av) if err != nil { return nil, nil, xerrors.Errorf("creating remainder acct: %w", err) } case genesis.TMultisig: - if err = createMultisigAccount(ctx, bs, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs); err != nil { + if err = createMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil { return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err) } default: @@ -353,12 +361,38 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return state, keyIDs, nil } -func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address) error { +func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) { + ast, err := account.MakeState(adt.WrapStore(ctx, cst), av, addr) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, ast.GetState()) + if err != nil { + return nil, err + } + + actcid, err := account.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: bal, + } + + return act, nil +} + +func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error { var ainfo genesis.AccountMeta if err := json.Unmarshal(info.Meta, &ainfo); err != nil { return xerrors.Errorf("unmarshaling account meta: %w", err) } - st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner}) + + aa, err := makeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance) if err != nil { return err } @@ -368,18 +402,14 @@ func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.St return fmt.Errorf("no registered ID for account actor: %s", ainfo.Owner) } - err = state.SetActor(ida, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: info.Balance, - Head: st, - }) + err = state.SetActor(ida, aa) if err != nil { return xerrors.Errorf("setting account from actmap: %w", err) } return nil } -func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address) error { +func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error { if info.Type != genesis.TMultisig { return fmt.Errorf("can only call createMultisigAccount with multisig Actor info") } @@ -387,10 +417,6 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I if err := json.Unmarshal(info.Meta, &ainfo); err != nil { return xerrors.Errorf("unmarshaling account meta: %w", err) } - pending, err := adt0.MakeEmptyMap(adt0.WrapStore(ctx, cst)).Root() - if err != nil { - return xerrors.Errorf("failed to create empty map: %v", err) - } var signers []address.Address @@ -407,44 +433,45 @@ func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.I continue } - st, err := cst.Put(ctx, &account0.State{Address: e}) + aa, err := makeAccountActor(ctx, cst, av, e, big.Zero()) if err != nil { return err } - err = state.SetActor(idAddress, &types.Actor{ - Code: builtin0.AccountActorCodeID, - Balance: types.NewInt(0), - Head: st, - }) - if err != nil { + + if err = state.SetActor(idAddress, aa); err != nil { return xerrors.Errorf("setting account from actmap: %w", err) } signers = append(signers, idAddress) } - st, err := cst.Put(ctx, &multisig0.State{ - Signers: signers, - NumApprovalsThreshold: uint64(ainfo.Threshold), - StartEpoch: abi.ChainEpoch(ainfo.VestingStart), - UnlockDuration: abi.ChainEpoch(ainfo.VestingDuration), - PendingTxns: pending, - InitialBalance: info.Balance, - }) + mst, err := multisig.MakeState(adt.WrapStore(ctx, cst), av, signers, uint64(ainfo.Threshold), abi.ChainEpoch(ainfo.VestingStart), abi.ChainEpoch(ainfo.VestingDuration), info.Balance) if err != nil { return err } + + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return err + } + + actcid, err := multisig.GetActorCodeID(av) + if err != nil { + return err + } + err = state.SetActor(ida, &types.Actor{ - Code: builtin0.MultisigActorCodeID, + Code: actcid, Balance: info.Balance, - Head: st, + Head: statecid, }) if err != nil { return xerrors.Errorf("setting account from actmap: %w", err) } + return nil } -func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template, keyIDs map[address.Address]address.Address) (cid.Cid, error) { +func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template, keyIDs map[address.Address]address.Address, nv network.Version) (cid.Cid, error) { verifNeeds := make(map[address.Address]abi.PaddedPieceSize) var sum abi.PaddedPieceSize @@ -455,8 +482,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci Bstore: cs.StateBlockstore(), Syscalls: mkFakedSigSyscalls(cs.VMSys()), CircSupplyCalc: nil, - NtwkVersion: genesisNetworkVersion, - BaseFee: types.NewInt(0), + NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { + return nv + }, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, &vmopt) if err != nil { @@ -485,7 +514,8 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci return cid.Undef, err } - _, err = doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{ + // Note: This is brittle, if the methodNum / param changes, it could break things + _, err = doExecValue(ctx, vm, verifreg.Address, verifregRoot, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg0.AddVerifierParams{ Address: verifier, Allowance: abi.NewStoragePower(int64(sum)), // eh, close enough @@ -496,7 +526,8 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci } for c, amt := range verifNeeds { - _, err := doExecValue(ctx, vm, builtin0.VerifiedRegistryActorAddr, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{ + // Note: This is brittle, if the methodNum / param changes, it could break things + _, err := doExecValue(ctx, vm, verifreg.Address, verifier, types.NewInt(0), builtin0.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg0.AddVerifiedClientParams{ Address: c, Allowance: abi.NewStoragePower(int64(amt)), })) @@ -531,17 +562,17 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), sys, j) // Verify PreSealed Data - stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs) + stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template, keyIDs, template.NetworkVersion) if err != nil { return nil, xerrors.Errorf("failed to verify presealed data: %w", err) } - stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners) + stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners, template.NetworkVersion) if err != nil { return nil, xerrors.Errorf("setup miners failed: %w", err) } - store := adt0.WrapStore(ctx, cbor.NewCborStore(bs)) + store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) emptyroot, err := adt0.MakeEmptyArray(store).Root() if err != nil { return nil, xerrors.Errorf("amt build failed: %w", err) @@ -590,7 +621,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto } b := &types.BlockHeader{ - Miner: builtin0.SystemActorAddr, + Miner: system.Address, Ticket: genesisticket, Parents: []cid.Cid{filecoinGenesisCid}, Height: 0, diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 86fd16812..e6f17d677 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -6,6 +6,22 @@ import ( "fmt" "math/rand" + power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" + + reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" + + market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/policy" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + "github.com/filecoin-project/go-state-types/network" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/power" @@ -61,7 +77,12 @@ func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { } } -func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner) (cid.Cid, error) { +// Note: Much of this is brittle, if the methodNum / param / return changes, it will break things +func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner, nv network.Version) (cid.Cid, error) { + + cst := cbor.NewCborStore(cs.StateBlockstore()) + av := actors.VersionForNetwork(nv) + csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { return big.Zero(), nil } @@ -73,8 +94,10 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid Bstore: cs.StateBlockstore(), Syscalls: mkFakedSigSyscalls(cs.VMSys()), CircSupplyCalc: csc, - NtwkVersion: genesisNetworkVersion, - BaseFee: types.NewInt(0), + NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { + return nv + }, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, vmopt) @@ -94,12 +117,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid dealIDs []abi.DealID }, len(miners)) + maxPeriods := policy.GetMaxSectorExpirationExtension() / miner.WPoStProvingPeriod for i, m := range miners { // Create miner through power actor i := i m := m - spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, GenesisNetworkVersion) + spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv) if err != nil { return cid.Undef, err } @@ -113,7 +137,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid } params := mustEnc(constructorParams) - rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, builtin0.MethodsPower.CreateMiner, params) + rval, err := doExecValue(ctx, vm, power.Address, m.Owner, m.PowerBalance, power.Methods.CreateMiner, params) if err != nil { return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) } @@ -129,23 +153,34 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid } minerInfos[i].maddr = ma.IDAddress - // TODO: ActorUpgrade - err = vm.MutateState(ctx, minerInfos[i].maddr, func(cst cbor.IpldStore, st *miner0.State) error { - maxPeriods := miner0.MaxSectorExpirationExtension / miner0.WPoStProvingPeriod - minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + st.ProvingPeriodStart - 1 - - return nil - }) + _, err = vm.Flush(ctx) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) } + + mact, err := vm.StateTree().GetActor(minerInfos[i].maddr) + if err != nil { + return cid.Undef, xerrors.Errorf("getting newly created miner actor: %w", err) + } + + mst, err := miner.Load(adt.WrapStore(ctx, cst), mact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting newly created miner state: %w", err) + } + + pps, err := mst.GetProvingPeriodStart() + if err != nil { + return cid.Undef, xerrors.Errorf("getting newly created miner proving period start: %w", err) + } + + minerInfos[i].presealExp = (maxPeriods-1)*miner0.WPoStProvingPeriod + pps - 1 } // Add market funds if m.MarketBalance.GreaterThan(big.Zero()) { params := mustEnc(&minerInfos[i].maddr) - _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, builtin0.MethodsMarket.AddBalance, params) + _, err := doExecValue(ctx, vm, market.Address, m.Worker, m.MarketBalance, market.Methods.AddBalance, params) if err != nil { return cid.Undef, xerrors.Errorf("failed to create genesis miner (add balance): %w", err) } @@ -203,35 +238,66 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid for pi := range m.Sectors { rawPow = types.BigAdd(rawPow, types.NewInt(uint64(m.SectorSize))) - dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, []abi.DealID{minerInfos[i].dealIDs[pi]}, 0, minerInfos[i].presealExp) + dweight, vdweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, []abi.DealID{minerInfos[i].dealIDs[pi]}, 0, minerInfos[i].presealExp, av) if err != nil { return cid.Undef, xerrors.Errorf("getting deal weight: %w", err) } - sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight) + sectorWeight := builtin.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight, vdweight) qaPow = types.BigAdd(qaPow, sectorWeight) } } - err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { - st.TotalQualityAdjPower = qaPow - st.TotalRawBytePower = rawPow - - st.ThisEpochQualityAdjPower = qaPow - st.ThisEpochRawBytePower = rawPow - return nil - }) + _, err = vm.Flush(ctx) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) } - err = vm.MutateState(ctx, reward.Address, func(sct cbor.IpldStore, st *reward0.State) error { - *st = *reward0.ConstructState(qaPow) - return nil - }) + pact, err := vm.StateTree().GetActor(power.Address) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("getting power actor: %w", err) + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power state: %w", err) + } + + if err = pst.SetTotalQualityAdjPower(qaPow); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalQualityAdjPower in power state: %w", err) + } + + if err = pst.SetTotalRawBytePower(rawPow); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalRawBytePower in power state: %w", err) + } + + if err = pst.SetThisEpochQualityAdjPower(qaPow); err != nil { + return cid.Undef, xerrors.Errorf("setting ThisEpochQualityAdjPower in power state: %w", err) + } + + if err = pst.SetThisEpochRawBytePower(rawPow); err != nil { + return cid.Undef, xerrors.Errorf("setting ThisEpochRawBytePower in power state: %w", err) + } + + pcid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return cid.Undef, xerrors.Errorf("putting power state: %w", err) + } + + pact.Head = pcid + + if err = vm.StateTree().SetActor(power.Address, pact); err != nil { + return cid.Undef, xerrors.Errorf("setting power state: %w", err) + } + + rewact, err := SetupRewardActor(ctx, cs.StateBlockstore(), big.Zero(), actors.VersionForNetwork(nv)) + if err != nil { + return cid.Undef, xerrors.Errorf("setup reward actor: %w", err) + } + + if err = vm.StateTree().SetActor(reward.Address, rewact); err != nil { + return cid.Undef, xerrors.Errorf("set reward actor: %w", err) } } @@ -248,24 +314,55 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! } - dweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, params.DealIDs, 0, minerInfos[i].presealExp) + dweight, vdweight, err := dealWeight(ctx, vm, minerInfos[i].maddr, params.DealIDs, 0, minerInfos[i].presealExp, av) if err != nil { return cid.Undef, xerrors.Errorf("getting deal weight: %w", err) } - sectorWeight := miner0.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight.DealWeight, dweight.VerifiedDealWeight) + sectorWeight := builtin.QAPowerForWeight(m.SectorSize, minerInfos[i].presealExp, dweight, vdweight) // we've added fake power for this sector above, remove it now - err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { - st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint - st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize))) - return nil - }) + + _, err = vm.Flush(ctx) if err != nil { - return cid.Undef, xerrors.Errorf("removing fake power: %w", err) + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) } - epochReward, err := currentEpochBlockReward(ctx, vm, minerInfos[i].maddr) + pact, err := vm.StateTree().GetActor(power.Address) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power actor: %w", err) + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power state: %w", err) + } + + pc, err := pst.TotalPower() + if err != nil { + return cid.Undef, xerrors.Errorf("getting total power: %w", err) + } + + if err = pst.SetTotalRawBytePower(types.BigSub(pc.RawBytePower, types.NewInt(uint64(m.SectorSize)))); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalRawBytePower in power state: %w", err) + } + + if err = pst.SetTotalQualityAdjPower(types.BigSub(pc.QualityAdjPower, sectorWeight)); err != nil { + return cid.Undef, xerrors.Errorf("setting TotalQualityAdjPower in power state: %w", err) + } + + pcid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return cid.Undef, xerrors.Errorf("putting power state: %w", err) + } + + pact.Head = pcid + + if err = vm.StateTree().SetActor(power.Address, pact); err != nil { + return cid.Undef, xerrors.Errorf("setting power state: %w", err) + } + + baselinePower, rewardSmoothed, err := currentEpochBlockReward(ctx, vm, minerInfos[i].maddr, av) if err != nil { return cid.Undef, xerrors.Errorf("getting current epoch reward: %w", err) } @@ -275,13 +372,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid return cid.Undef, xerrors.Errorf("getting current total power: %w", err) } - pcd := miner0.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight) + pcd := miner0.PreCommitDepositForPower(&rewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight) pledge := miner0.InitialPledgeForPower( sectorWeight, - epochReward.ThisEpochBaselinePower, + baselinePower, tpow.PledgeCollateral, - epochReward.ThisEpochRewardSmoothed, + &rewardSmoothed, tpow.QualityAdjPowerSmoothed, circSupply(ctx, vm, minerInfos[i].maddr), ) @@ -289,7 +386,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid pledge = big.Add(pcd, pledge) fmt.Println(types.FIL(pledge)) - _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin0.MethodsMiner.PreCommitSector, mustEnc(params)) + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, miner.Methods.PreCommitSector, mustEnc(params)) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } @@ -299,28 +396,84 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid Sectors: []abi.SectorNumber{preseal.SectorID}, } - _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), builtin0.MethodsMiner.ConfirmSectorProofsValid, mustEnc(confirmParams)) + _, err = doExecValue(ctx, vm, minerInfos[i].maddr, power.Address, big.Zero(), miner.Methods.ConfirmSectorProofsValid, mustEnc(confirmParams)) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } + + if av > actors.Version2 { + // post v2, we need to explicitly Claim this power since ConfirmSectorProofsValid doesn't do it anymore + claimParams := &power4.UpdateClaimedPowerParams{ + RawByteDelta: types.NewInt(uint64(m.SectorSize)), + QualityAdjustedDelta: sectorWeight, + } + + _, err = doExecValue(ctx, vm, power.Address, minerInfos[i].maddr, big.Zero(), power.Methods.UpdateClaimedPower, mustEnc(claimParams)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) + } + + _, err = vm.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } + + mact, err := vm.StateTree().GetActor(minerInfos[i].maddr) + if err != nil { + return cid.Undef, xerrors.Errorf("getting miner actor: %w", err) + } + + mst, err := miner.Load(adt.WrapStore(ctx, cst), mact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting miner state: %w", err) + } + + if err = mst.EraseAllUnproven(); err != nil { + return cid.Undef, xerrors.Errorf("failed to erase unproven sectors: %w", err) + } + + mcid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return cid.Undef, xerrors.Errorf("putting miner state: %w", err) + } + + mact.Head = mcid + + if err = vm.StateTree().SetActor(minerInfos[i].maddr, mact); err != nil { + return cid.Undef, xerrors.Errorf("setting miner state: %w", err) + } + } } } } // Sanity-check total network power - err = vm.MutateState(ctx, power.Address, func(cst cbor.IpldStore, st *power0.State) error { - if !st.TotalRawBytePower.Equals(rawPow) { - return xerrors.Errorf("st.TotalRawBytePower doesn't match previously calculated rawPow") - } - - if !st.TotalQualityAdjPower.Equals(qaPow) { - return xerrors.Errorf("st.TotalQualityAdjPower doesn't match previously calculated qaPow") - } - - return nil - }) + _, err = vm.Flush(ctx) if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) + return cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } + + pact, err := vm.StateTree().GetActor(power.Address) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power actor: %w", err) + } + + pst, err := power.Load(adt.WrapStore(ctx, cst), pact) + if err != nil { + return cid.Undef, xerrors.Errorf("getting power state: %w", err) + } + + pc, err := pst.TotalPower() + if err != nil { + return cid.Undef, xerrors.Errorf("getting total power: %w", err) + } + + if !pc.RawBytePower.Equals(rawPow) { + return cid.Undef, xerrors.Errorf("TotalRawBytePower (%s) doesn't match previously calculated rawPow (%s)", pc.RawBytePower, rawPow) + } + + if !pc.QualityAdjPower.Equals(qaPow) { + return cid.Undef, xerrors.Errorf("QualityAdjPower (%s) doesn't match previously calculated qaPow (%s)", pc.QualityAdjPower, qaPow) } // TODO: Should we re-ConstructState for the reward actor using rawPow as currRealizedPower here? @@ -372,43 +525,79 @@ func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (* return &pwr, nil } -func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) (market0.VerifyDealsForActivationReturn, error) { - params := &market.VerifyDealsForActivationParams{ - DealIDs: dealIDs, - SectorStart: sectorStart, - SectorExpiry: sectorExpiry, - } +func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) { + // TODO: This hack should move to market actor wrapper + if av <= actors.Version2 { + params := &market0.VerifyDealsForActivationParams{ + DealIDs: dealIDs, + SectorStart: sectorStart, + SectorExpiry: sectorExpiry, + } - var dealWeights market0.VerifyDealsForActivationReturn + var dealWeights market0.VerifyDealsForActivationReturn + ret, err := doExecValue(ctx, vm, + market.Address, + maddr, + abi.NewTokenAmount(0), + builtin0.MethodsMarket.VerifyDealsForActivation, + mustEnc(params), + ) + if err != nil { + return big.Zero(), big.Zero(), err + } + if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { + return big.Zero(), big.Zero(), err + } + + return dealWeights.DealWeight, dealWeights.VerifiedDealWeight, nil + } + params := &market4.VerifyDealsForActivationParams{Sectors: []market4.SectorDeals{{ + SectorExpiry: sectorExpiry, + DealIDs: dealIDs, + }}} + + var dealWeights market4.VerifyDealsForActivationReturn ret, err := doExecValue(ctx, vm, market.Address, maddr, abi.NewTokenAmount(0), - builtin0.MethodsMarket.VerifyDealsForActivation, + market.Methods.VerifyDealsForActivation, mustEnc(params), ) if err != nil { - return market0.VerifyDealsForActivationReturn{}, err + return big.Zero(), big.Zero(), err } if err := dealWeights.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { - return market0.VerifyDealsForActivationReturn{}, err + return big.Zero(), big.Zero(), err } - return dealWeights, nil + return dealWeights.Sectors[0].DealWeight, dealWeights.Sectors[0].VerifiedDealWeight, nil } -func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward0.ThisEpochRewardReturn, error) { - rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), builtin0.MethodsReward.ThisEpochReward, nil) +func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) { + rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil) if err != nil { - return nil, err + return big.Zero(), builtin.FilterEstimate{}, err } - var epochReward reward0.ThisEpochRewardReturn + // TODO: This hack should move to reward actor wrapper + if av <= actors.Version2 { + var epochReward reward0.ThisEpochRewardReturn + + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { + return big.Zero(), builtin.FilterEstimate{}, err + } + + return epochReward.ThisEpochBaselinePower, *epochReward.ThisEpochRewardSmoothed, nil + } + + var epochReward reward4.ThisEpochRewardReturn + if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil { - return nil, err + return big.Zero(), builtin.FilterEstimate{}, err } - return &epochReward, nil + return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil } func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount { diff --git a/chain/gen/genesis/util.go b/chain/gen/genesis/util.go index 1ebc58121..67a4e9579 100644 --- a/chain/gen/genesis/util.go +++ b/chain/gen/genesis/util.go @@ -3,9 +3,6 @@ package genesis import ( "context" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" cbg "github.com/whyrusleeping/cbor-gen" @@ -49,29 +46,3 @@ func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value return ret.Return, nil } - -// TODO: Get from build -// TODO: make a list/schedule of these. -var GenesisNetworkVersion = func() network.Version { - // returns the version _before_ the first upgrade. - if build.UpgradeBreezeHeight >= 0 { - return network.Version0 - } - if build.UpgradeSmokeHeight >= 0 { - return network.Version1 - } - if build.UpgradeIgnitionHeight >= 0 { - return network.Version2 - } - if build.UpgradeAssemblyHeight >= 0 { - return network.Version3 - } - if build.UpgradeLiftoffHeight >= 0 { - return network.Version3 - } - return build.ActorUpgradeNetworkVersion - 1 // genesis requires actors v0. -}() - -func genesisNetworkVersion(context.Context, abi.ChainEpoch) network.Version { // TODO: Get from build/ - return GenesisNetworkVersion // TODO: Get from build/ -} // TODO: Get from build/ diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go new file mode 100644 index 000000000..11203e7df --- /dev/null +++ b/chain/messagepool/check.go @@ -0,0 +1,431 @@ +package messagepool + +import ( + "context" + "fmt" + stdbig "math/big" + "sort" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" +) + +var baseFeeUpperBoundFactor = types.NewInt(10) + +// CheckMessages performs a set of logic checks for a list of messages, prior to submitting it to the mpool +func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + flex := make([]bool, len(protos)) + msgs := make([]*types.Message, len(protos)) + for i, p := range protos { + flex[i] = !p.ValidNonce + msgs[i] = &p.Message + } + return mp.checkMessages(ctx, msgs, false, flex) +} + +// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor +func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) { + var msgs []*types.Message + mp.lk.Lock() + mset, ok := mp.pending[from] + if ok { + for _, sm := range mset.msgs { + msgs = append(msgs, &sm.Message) + } + } + mp.lk.Unlock() + + if len(msgs) == 0 { + return nil, nil + } + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Nonce < msgs[j].Nonce + }) + + return mp.checkMessages(ctx, msgs, true, nil) +} + +// CheckReplaceMessages performs a set of logical checks for related messages while performing a +// replacement. +func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*types.Message) ([][]api.MessageCheckStatus, error) { + msgMap := make(map[address.Address]map[uint64]*types.Message) + count := 0 + + mp.lk.Lock() + for _, m := range replace { + mmap, ok := msgMap[m.From] + if !ok { + mmap = make(map[uint64]*types.Message) + msgMap[m.From] = mmap + mset, ok := mp.pending[m.From] + if ok { + count += len(mset.msgs) + for _, sm := range mset.msgs { + mmap[sm.Message.Nonce] = &sm.Message + } + } else { + count++ + } + } + mmap[m.Nonce] = m + } + mp.lk.Unlock() + + msgs := make([]*types.Message, 0, count) + start := 0 + for _, mmap := range msgMap { + end := start + len(mmap) + + for _, m := range mmap { + msgs = append(msgs, m) + } + + sort.Slice(msgs[start:end], func(i, j int) bool { + return msgs[start+i].Nonce < msgs[start+j].Nonce + }) + + start = end + } + + return mp.checkMessages(ctx, msgs, true, nil) +} + +// flexibleNonces should be either nil or of len(msgs), it signifies that message at given index +// has non-determied nonce at this point +func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, interned bool, flexibleNonces []bool) (result [][]api.MessageCheckStatus, err error) { + if mp.api.IsLite() { + return nil, nil + } + mp.curTsLk.Lock() + curTs := mp.curTs + mp.curTsLk.Unlock() + + epoch := curTs.Height() + + var baseFee big.Int + if len(curTs.Blocks()) > 0 { + baseFee = curTs.Blocks()[0].ParentBaseFee + } else { + baseFee, err = mp.api.ChainComputeBaseFee(context.Background(), curTs) + if err != nil { + return nil, xerrors.Errorf("error computing basefee: %w", err) + } + } + + baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) + baseFeeUpperBound := types.BigMul(baseFee, baseFeeUpperBoundFactor) + + type actorState struct { + nextNonce uint64 + requiredFunds *stdbig.Int + } + + state := make(map[address.Address]*actorState) + balances := make(map[address.Address]big.Int) + + result = make([][]api.MessageCheckStatus, len(msgs)) + + for i, m := range msgs { + // pre-check: actor nonce + check := api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageGetStateNonce, + }, + } + + st, ok := state[m.From] + if !ok { + mp.lk.Lock() + mset, ok := mp.pending[m.From] + if ok && !interned { + st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds} + for _, m := range mset.msgs { + st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.Message.Value.Int) + } + state[m.From] = st + mp.lk.Unlock() + + check.OK = true + check.Hint = map[string]interface{}{ + "nonce": st.nextNonce, + } + } else { + mp.lk.Unlock() + + stateNonce, err := mp.getStateNonce(ctx, m.From, curTs) + if err != nil { + check.OK = false + check.Err = fmt.Sprintf("error retrieving state nonce: %s", err.Error()) + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "nonce": stateNonce, + } + } + + st = &actorState{nextNonce: stateNonce, requiredFunds: new(stdbig.Int)} + state[m.From] = st + } + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + continue + } + + // pre-check: actor balance + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageGetStateBalance, + }, + } + + balance, ok := balances[m.From] + if !ok { + balance, err = mp.getStateBalance(ctx, m.From, curTs) + if err != nil { + check.OK = false + check.Err = fmt.Sprintf("error retrieving state balance: %s", err) + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "balance": balance, + } + } + + balances[m.From] = balance + } else { + check.OK = true + check.Hint = map[string]interface{}{ + "balance": balance, + } + } + + result[i] = append(result[i], check) + if !check.OK { + continue + } + + // 1. Serialization + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageSerialize, + }, + } + + bytes, err := m.Serialize() + if err != nil { + check.OK = false + check.Err = err.Error() + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 2. Message size + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageSize, + }, + } + + if len(bytes) > MaxMessageSize-128 { // 128 bytes to account for signature size + check.OK = false + check.Err = "message too big" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 3. Syntactic validation + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageValidity, + }, + } + + if err := m.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil { + check.OK = false + check.Err = fmt.Sprintf("syntactically invalid message: %s", err.Error()) + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + // skip remaining checks if it is a syntatically invalid message + continue + } + + // gas checks + + // 4. Min Gas + minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) + + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageMinGas, + Hint: map[string]interface{}{ + "minGas": minGas, + }, + }, + } + + if m.GasLimit < minGas.Total() { + check.OK = false + check.Err = "GasLimit less than epoch minimum gas" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 5. Min Base Fee + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageMinBaseFee, + }, + } + + if m.GasFeeCap.LessThan(minimumBaseFee) { + check.OK = false + check.Err = "GasFeeCap less than minimum base fee" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + if !check.OK { + goto checkState + } + + // 6. Base Fee + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBaseFee, + Hint: map[string]interface{}{ + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFee) { + check.OK = false + check.Err = "GasFeeCap less than current base fee" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 7. Base Fee lower bound + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBaseFeeLowerBound, + Hint: map[string]interface{}{ + "baseFeeLowerBound": baseFeeLowerBound, + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFeeLowerBound) { + check.OK = false + check.Err = "GasFeeCap less than base fee lower bound for inclusion in next 20 epochs" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // 8. Base Fee upper bound + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBaseFeeUpperBound, + Hint: map[string]interface{}{ + "baseFeeUpperBound": baseFeeUpperBound, + "baseFee": baseFee, + }, + }, + } + + if m.GasFeeCap.LessThan(baseFeeUpperBound) { + check.OK = true // on purpose, the checks is more of a warning + check.Err = "GasFeeCap less than base fee upper bound for inclusion in next 20 epochs" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + + // stateful checks + checkState: + // 9. Message Nonce + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageNonce, + Hint: map[string]interface{}{ + "nextNonce": st.nextNonce, + }, + }, + } + + if (flexibleNonces == nil || !flexibleNonces[i]) && st.nextNonce != m.Nonce { + check.OK = false + check.Err = fmt.Sprintf("message nonce doesn't match next nonce (%d)", st.nextNonce) + } else { + check.OK = true + st.nextNonce++ + } + + result[i] = append(result[i], check) + + // check required funds -vs- balance + st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.RequiredFunds().Int) + st.requiredFunds.Add(st.requiredFunds, m.Value.Int) + + // 10. Balance + check = api.MessageCheckStatus{ + Cid: m.Cid(), + CheckStatus: api.CheckStatus{ + Code: api.CheckStatusMessageBalance, + Hint: map[string]interface{}{ + "requiredFunds": big.Int{Int: stdbig.NewInt(0).Set(st.requiredFunds)}, + }, + }, + } + + if balance.Int.Cmp(st.requiredFunds) < 0 { + check.OK = false + check.Err = "insufficient balance" + } else { + check.OK = true + } + + result[i] = append(result[i], check) + } + + return result, nil +} diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 836629d90..f6c8e3ac9 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -426,6 +426,27 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ return mp, nil } +func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error { + mp.lk.Lock() + defer mp.lk.Unlock() + + for _, mset := range mp.pending { + for _, m := range mset.msgs { + err := f(m.Cid()) + if err != nil { + return err + } + + err = f(m.Message.Cid()) + if err != nil { + return err + } + } + } + + return nil +} + func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) { // check the cache a, f := mp.keyCache[addr] @@ -948,6 +969,13 @@ func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ typ return mp.getNonceLocked(ctx, addr, mp.curTs) } +// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling +func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) { + mp.curTsLk.Lock() + defer mp.curTsLk.Unlock() + return mp.api.GetActorAfter(addr, mp.curTs) +} + func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) { stateNonce, err := mp.getStateNonce(ctx, addr, curTs) // sanity check if err != nil { @@ -973,11 +1001,11 @@ func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, return stateNonce, nil } -func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) { +func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, ts *types.TipSet) (uint64, error) { done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration) defer done() - act, err := mp.api.GetActorAfter(addr, curTs) + act, err := mp.api.GetActorAfter(addr, ts) if err != nil { return 0, err } diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index a7d7c0739..2ea8fdec0 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -106,6 +106,10 @@ func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { return cid.Undef, nil } +func (tma *testMpoolAPI) IsLite() bool { + return false +} + func (tma *testMpoolAPI) PubSubPublish(string, []byte) error { tma.published++ return nil @@ -201,7 +205,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { t.Helper() - n, err := mp.GetNonce(context.Background(), addr, types.EmptyTSK) + n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go index b2cc26e58..0f904c52c 100644 --- a/chain/messagepool/provider.go +++ b/chain/messagepool/provider.go @@ -9,6 +9,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/messagesigner" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -30,11 +31,14 @@ type Provider interface { MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) + IsLite() bool } type mpoolProvider struct { sm *stmgr.StateManager ps *pubsub.PubSub + + lite messagesigner.MpoolNonceAPI } var _ Provider = (*mpoolProvider)(nil) @@ -43,6 +47,14 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { return &mpoolProvider{sm: sm, ps: ps} } +func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer messagesigner.MpoolNonceAPI) Provider { + return &mpoolProvider{sm: sm, ps: ps, lite: noncer} +} + +func (mpp *mpoolProvider) IsLite() bool { + return mpp.lite != nil +} + func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { mpp.sm.ChainStore().SubscribeHeadChanges( store.WrapHeadChangeCoalescer( @@ -63,6 +75,19 @@ func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { } func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) { + if mpp.IsLite() { + n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting nonce over lite: %w", err) + } + a, err := mpp.lite.GetActor(context.TODO(), addr, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting actor over lite: %w", err) + } + a.Nonce = n + return a, nil + } + stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts) if err != nil { return nil, xerrors.Errorf("computing tipset state for GetActor: %w", err) diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index 3684a7b80..611ab8e5f 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -11,9 +11,7 @@ import ( "github.com/filecoin-project/go-address" tbig "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/specs-actors/v3/actors/builtin" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" @@ -705,17 +703,6 @@ func (*MessagePool) getGasPerf(gasReward *big.Int, gasLimit int64) float64 { return r } -func isMessageMute(m *types.Message, ts *types.TipSet) bool { - if api.RunningNodeType != api.NodeFull || ts.Height() > build.UpgradeTurboHeight { - return false - } - - if m.To == builtin.StoragePowerActorAddr { - return m.Method == builtin.MethodsPower.CreateMiner - } - return false -} - func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) []*msgChain { // collect all messages msgs := make([]*types.SignedMessage, 0, len(mset)) @@ -777,10 +764,6 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 break } - if isMessageMute(&m.Message, ts) { - break - } - balance = new(big.Int).Sub(balance, required) value := m.Message.Value.Int diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go index c91f75632..063d1aa7d 100644 --- a/chain/messagesigner/messagesigner.go +++ b/chain/messagesigner/messagesigner.go @@ -24,6 +24,7 @@ var log = logging.Logger("messagesigner") type MpoolNonceAPI interface { GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error) + GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) } // MessageSigner keeps track of nonces per address, and increments the nonce diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go index 0d31e8255..20d9af38b 100644 --- a/chain/messagesigner/messagesigner_test.go +++ b/chain/messagesigner/messagesigner_test.go @@ -43,6 +43,9 @@ func (mp *mockMpool) GetNonce(_ context.Context, addr address.Address, _ types.T return mp.nonces[addr], nil } +func (mp *mockMpool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) { + panic("don't use it") +} func TestMessageSignerSignMessage(t *testing.T) { ctx := context.Background() diff --git a/chain/state/statetree.go b/chain/state/statetree.go index d783fff69..8705aeff8 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -14,7 +14,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/chain/actors" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" cbg "github.com/whyrusleeping/cbor-gen" @@ -143,11 +142,21 @@ func (ss *stateSnaps) deleteActor(addr address.Address) { // VersionForNetwork returns the state tree version for the given network // version. -func VersionForNetwork(ver network.Version) types.StateTreeVersion { - if actors.VersionForNetwork(ver) == actors.Version0 { - return types.StateTreeVersion0 +func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { + switch ver { + case network.Version0, network.Version1, network.Version2, network.Version3: + return types.StateTreeVersion0, nil + case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9: + return types.StateTreeVersion1, nil + case network.Version10, network.Version11: + return types.StateTreeVersion2, nil + case network.Version12: + return types.StateTreeVersion3, nil + case network.Version13: + return types.StateTreeVersion4, nil + default: + panic(fmt.Sprintf("unsupported network version %d", ver)) } - return types.StateTreeVersion1 } func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, error) { @@ -155,7 +164,7 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e switch ver { case types.StateTreeVersion0: // info is undefined - case types.StateTreeVersion1, types.StateTreeVersion2: + case types.StateTreeVersion1, types.StateTreeVersion2, types.StateTreeVersion3, types.StateTreeVersion4: var err error info, err = cst.Put(context.TODO(), new(types.StateInfo0)) if err != nil { @@ -495,6 +504,26 @@ func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) erro } func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error { + // Walk through layers, if any. + seen := make(map[address.Address]struct{}) + for i := len(st.snaps.layers) - 1; i >= 0; i-- { + for addr, op := range st.snaps.layers[i].actors { + if _, ok := seen[addr]; ok { + continue + } + seen[addr] = struct{}{} + if op.Delete { + continue + } + act := op.Act // copy + if err := f(addr, &act); err != nil { + return err + } + } + + } + + // Now walk through the saved actors. var act types.Actor return st.root.ForEach(&act, func(k string) error { act := act // copy @@ -503,6 +532,12 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err) } + // no need to record anything here, there are no duplicates in the actors HAMT + // iself. + if _, ok := seen[addr]; ok { + return nil + } + return f(addr, &act) }) } @@ -512,7 +547,7 @@ func (st *StateTree) Version() types.StateTreeVersion { return st.version } -func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) { +func Diff(ctx context.Context, oldTree, newTree *StateTree) (map[string]types.Actor, error) { out := map[string]types.Actor{} var ( @@ -520,33 +555,38 @@ func Diff(oldTree, newTree *StateTree) (map[string]types.Actor, error) { buf = bytes.NewReader(nil) ) if err := newTree.root.ForEach(&ncval, func(k string) error { - var act types.Actor + select { + case <-ctx.Done(): + return ctx.Err() + default: + var act types.Actor - addr, err := address.NewFromBytes([]byte(k)) - if err != nil { - return xerrors.Errorf("address in state tree was not valid: %w", err) + addr, err := address.NewFromBytes([]byte(k)) + if err != nil { + return xerrors.Errorf("address in state tree was not valid: %w", err) + } + + found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) + if err != nil { + return err + } + + if found && bytes.Equal(ocval.Raw, ncval.Raw) { + return nil // not changed + } + + buf.Reset(ncval.Raw) + err = act.UnmarshalCBOR(buf) + buf.Reset(nil) + + if err != nil { + return err + } + + out[addr.String()] = act + + return nil } - - found, err := oldTree.root.Get(abi.AddrKey(addr), &ocval) - if err != nil { - return err - } - - if found && bytes.Equal(ocval.Raw, ncval.Raw) { - return nil // not changed - } - - buf.Reset(ncval.Raw) - err = act.UnmarshalCBOR(buf) - buf.Reset(nil) - - if err != nil { - return err - } - - out[addr.String()] = act - - return nil }); err != nil { return nil, err } diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go index 91674337b..9177af312 100644 --- a/chain/state/statetree_test.go +++ b/chain/state/statetree_test.go @@ -5,11 +5,12 @@ import ( "fmt" "testing" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" "github.com/filecoin-project/lotus/build" @@ -45,7 +46,12 @@ func BenchmarkStateTreeSet(b *testing.B) { func BenchmarkStateTreeSetFlush(b *testing.B) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + b.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { b.Fatal(err) } @@ -75,7 +81,12 @@ func BenchmarkStateTreeSetFlush(b *testing.B) { func TestResolveCache(t *testing.T) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } @@ -172,7 +183,12 @@ func TestResolveCache(t *testing.T) { func BenchmarkStateTree10kGetActor(b *testing.B) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + b.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { b.Fatal(err) } @@ -214,7 +230,12 @@ func BenchmarkStateTree10kGetActor(b *testing.B) { func TestSetCache(t *testing.T) { cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } @@ -251,7 +272,13 @@ func TestSetCache(t *testing.T) { func TestSnapshots(t *testing.T) { ctx := context.Background() cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst, VersionForNetwork(build.NewestNetworkVersion)) + + sv, err := VersionForNetwork(build.NewestNetworkVersion) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } @@ -334,8 +361,15 @@ func assertNotHas(t *testing.T, st *StateTree, addr address.Address) { func TestStateTreeConsistency(t *testing.T) { cst := cbor.NewMemCborStore() + // TODO: ActorUpgrade: this test tests pre actors v2 - st, err := NewStateTree(cst, VersionForNetwork(network.Version3)) + + sv, err := VersionForNetwork(network.Version3) + if err != nil { + t.Fatal(err) + } + + st, err := NewStateTree(cst, sv) if err != nil { t.Fatal(err) } diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index cfbf60a95..dc6da0f9c 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -39,25 +39,29 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. } bstate := ts.ParentState() - bheight := ts.Height() + pts, err := sm.cs.LoadTipSet(ts.Parents()) + if err != nil { + return nil, xerrors.Errorf("failed to load parent tipset: %w", err) + } + pheight := pts.Height() // If we have to run an expensive migration, and we're not at genesis, // return an error because the migration will take too long. // // We allow this at height 0 for at-genesis migrations (for testing). - if bheight-1 > 0 && sm.hasExpensiveFork(ctx, bheight-1) { + if pheight > 0 && sm.hasExpensiveFork(ctx, pheight) { return nil, ErrExpensiveFork } // Run the (not expensive) migration. - bstate, err := sm.handleStateForks(ctx, bstate, bheight-1, nil, ts) + bstate, err = sm.handleStateForks(ctx, bstate, pheight, nil, ts) if err != nil { return nil, fmt.Errorf("failed to handle fork: %w", err) } vmopt := &vm.VMOpts{ StateBase: bstate, - Epoch: bheight, + Epoch: pheight + 1, Rand: store.NewChainRand(sm.cs, ts.Cids()), Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), @@ -155,6 +159,11 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri return nil, xerrors.Errorf("computing tipset state: %w", err) } + state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts) + if err != nil { + return nil, fmt.Errorf("failed to handle fork: %w", err) + } + r := store.NewChainRand(sm.cs, ts.Cids()) if span.IsRecordingEvents() { @@ -167,7 +176,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri vmopt := &vm.VMOpts{ StateBase: state, - Epoch: ts.Height(), + Epoch: ts.Height() + 1, Rand: r, Bstore: sm.cs.StateBlockstore(), Syscalls: sm.cs.VMSys(), @@ -239,24 +248,18 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri var errHaltExecution = fmt.Errorf("halt") func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) { - var outm *types.Message - var outr *vm.ApplyRet + var finder messageFinder + // message to find + finder.mcid = mcid - _, _, err := sm.computeTipSetState(ctx, ts, func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error { - if c == mcid { - outm = m - outr = ret - return errHaltExecution - } - return nil - }) + _, _, err := sm.computeTipSetState(ctx, ts, &finder) if err != nil && !xerrors.Is(err, errHaltExecution) { return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err) } - if outr == nil { + if finder.outr == nil { return nil, nil, xerrors.Errorf("given message not found in tipset") } - return outm, outr, nil + return finder.outm, finder.outr, nil } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index ee5a26dea..bb87da44c 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -61,7 +61,7 @@ type MigrationCache interface { type MigrationFunc func( ctx context.Context, sm *StateManager, cache MigrationCache, - cb ExecCallback, oldState cid.Cid, + cb ExecMonitor, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet, ) (newState cid.Cid, err error) @@ -292,7 +292,7 @@ func (us UpgradeSchedule) Validate() error { return nil } -func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) { +func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) { retCid := root var err error u := sm.stateMigrations[height] @@ -472,7 +472,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo return nil } -func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { // Some initial parameters FundsForMiners := types.FromFil(1_000_000) LookbackEpoch := abi.ChainEpoch(32000) @@ -722,12 +722,12 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) } - if cb != nil { + if em != nil { // record the transfer in execution traces fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch)) - if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ MessageReceipt: *makeFakeRct(), ActorErr: nil, ExecutionTrace: types.ExecutionTrace{ @@ -740,7 +740,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio }, Duration: 0, GasCosts: nil, - }); err != nil { + }, false); err != nil { return cid.Undef, xerrors.Errorf("recording transfers: %w", err) } } @@ -748,7 +748,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio return tree.Flush(ctx) } -func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { store := sm.cs.ActorStore(ctx) if build.UpgradeLiftoffHeight <= epoch { @@ -785,12 +785,12 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) } - err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch) + err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts) if err != nil { return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) } - err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch) + err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts) if err != nil { return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) } @@ -803,7 +803,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb return tree.Flush(ctx) } -func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { store := sm.cs.ActorStore(ctx) tree, err := sm.StateTree(root) @@ -829,7 +829,7 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb E return tree.Flush(ctx) } -func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync()) store := store.ActorStore(ctx, buf) @@ -875,7 +875,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb return newRoot, nil } -func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { tree, err := sm.StateTree(root) if err != nil { return cid.Undef, xerrors.Errorf("getting state tree: %w", err) @@ -889,7 +889,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb return tree.Flush(ctx) } -func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { if build.BuildType != build.BuildMainnet { return root, nil } @@ -935,7 +935,7 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb E return newRoot, nil } -func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, cb ExecCallback, epoch abi.ChainEpoch) error { +func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error { a, err := tree.GetActor(addr) if xerrors.Is(err, types.ErrActorNotFound) { return types.ErrActorNotFound @@ -950,18 +950,18 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add return xerrors.Errorf("transferring terminated actor's balance: %w", err) } - if cb != nil { + if em != nil { // record the transfer in execution traces fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) - if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ MessageReceipt: *makeFakeRct(), ActorErr: nil, ExecutionTrace: trace, Duration: 0, GasCosts: nil, - }); err != nil { + }, false); err != nil { return xerrors.Errorf("recording transfers: %w", err) } } @@ -995,7 +995,7 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add return tree.SetActor(init_.Address, ia) } -func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { // Use all the CPUs except 3. workerCount := runtime.NumCPU() - 3 if workerCount <= 0 { @@ -1019,7 +1019,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache } if build.BuildType == build.BuildMainnet { - err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch) + err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts) if err != nil && !xerrors.Is(err, types.ErrActorNotFound) { return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err) } @@ -1097,7 +1097,7 @@ func upgradeActorsV3Common( return newRoot, nil } -func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { // Use all the CPUs except 3. workerCount := runtime.NumCPU() - 3 if workerCount <= 0 { @@ -1183,7 +1183,7 @@ func upgradeActorsV4Common( return newRoot, nil } -func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { +func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { // Use all the CPUs except 3. workerCount := runtime.NumCPU() - 3 if workerCount <= 0 { @@ -1296,7 +1296,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, return nil } -func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch) error { +func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error { if portions < 1 { return xerrors.Errorf("cannot split into 0 portions") } @@ -1393,12 +1393,12 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad i++ } - if cb != nil { + if em != nil { // record the transfer in execution traces fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch)) - if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ MessageReceipt: *makeFakeRct(), ActorErr: nil, ExecutionTrace: types.ExecutionTrace{ @@ -1411,7 +1411,7 @@ func splitGenesisMultisig0(ctx context.Context, cb ExecCallback, addr address.Ad }, Duration: 0, GasCosts: nil, - }); err != nil { + }, false); err != nil { return xerrors.Errorf("recording transfers: %w", err) } } diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index fe96ad610..6d22a294d 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -123,7 +123,7 @@ func TestForkHeightTriggers(t *testing.T) { cg.ChainStore(), UpgradeSchedule{{ Network: 1, Height: testForkHeight, - Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { cst := ipldcbor.NewCborStore(sm.ChainStore().StateBlockstore()) @@ -253,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) { Network: 1, Expensive: true, Height: testForkHeight, - Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { return root, nil }}}) @@ -297,22 +297,26 @@ func TestForkRefuseCall(t *testing.T) { t.Fatal(err) } + pts, err := cg.ChainStore().LoadTipSet(ts.TipSet.TipSet().Parents()) + require.NoError(t, err) + parentHeight := pts.Height() + currentHeight := ts.TipSet.TipSet().Height() + + // CallWithGas calls _at_ the current tipset. ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet()) - switch ts.TipSet.TipSet().Height() { - case testForkHeight, testForkHeight + 1: + if parentHeight <= testForkHeight && currentHeight >= testForkHeight { // If I had a fork, or I _will_ have a fork, it should fail. require.Equal(t, ErrExpensiveFork, err) - default: + } else { require.NoError(t, err) require.True(t, ret.MsgRct.ExitCode.IsSuccess()) } - // Call just runs on the parent state for a tipset, so we only - // expect an error at the fork height. + + // Call always applies the message to the "next block" after the tipset's parent state. ret, err = sm.Call(ctx, m, ts.TipSet.TipSet()) - switch ts.TipSet.TipSet().Height() { - case testForkHeight + 1: + if parentHeight == testForkHeight { require.Equal(t, ErrExpensiveFork, err) - default: + } else { require.NoError(t, err) require.True(t, ret.MsgRct.ExitCode.IsSuccess()) } @@ -363,7 +367,7 @@ func TestForkPreMigration(t *testing.T) { cg.ChainStore(), UpgradeSchedule{{ Network: 1, Height: testForkHeight, - Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, + Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { // Make sure the test that should be canceled, is canceled. diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 2160c849e..4f1351d2c 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -103,6 +103,8 @@ type StateManager struct { genesisPledge abi.TokenAmount genesisMarketFunds abi.TokenAmount + + tsExecMonitor ExecMonitor } // Caches a single state tree @@ -171,6 +173,15 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule }, nil } +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) { + sm, err := NewStateManagerWithUpgradeSchedule(cs, us) + if err != nil { + return nil, err + } + sm.tsExecMonitor = em + return sm, nil +} + func cidsToKey(cids []cid.Cid) string { var out string for _, c := range cids { @@ -255,7 +266,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil } - st, rec, err = sm.computeTipSetState(ctx, ts, nil) + st, rec, err = sm.computeTipSetState(ctx, ts, sm.tsExecMonitor) if err != nil { return cid.Undef, cid.Undef, err } @@ -263,39 +274,21 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return st, rec, nil } -func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - ir := &api.InvocResult{ - MsgCid: mcid, - Msg: msg, - MsgRct: &ret.MessageReceipt, - ExecutionTrace: ret.ExecutionTrace, - Duration: ret.Duration, - } - if ret.ActorErr != nil { - ir.Error = ret.ActorErr.Error() - } - if ret.GasCosts != nil { - ir.GasCost = MakeMsgGasCost(msg, ret) - } - *trace = append(*trace, ir) - return nil - } +func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) { + st, _, err := sm.computeTipSetState(ctx, ts, em) + return st, err } func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { - var trace []*api.InvocResult - st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace)) + var invocTrace []*api.InvocResult + st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace}) if err != nil { return cid.Undef, nil, err } - - return st, trace, nil + return st, invocTrace, nil } -type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error - -func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { +func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal) defer done() @@ -341,8 +334,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp if err != nil { return err } - if cb != nil { - if err := cb(cronMsg.Cid(), cronMsg, ret); err != nil { + if em != nil { + if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil { return xerrors.Errorf("callback failed on cron message: %w", err) } } @@ -368,7 +361,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp // handle state forks // XXX: The state tree - newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts) + newState, err := sm.handleStateForks(ctx, pstate, i, em, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) } @@ -407,8 +400,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp gasReward = big.Add(gasReward, r.GasCosts.MinerTip) penalty = big.Add(penalty, r.GasCosts.MinerPenalty) - if cb != nil { - if err := cb(cm.Cid(), m, r); err != nil { + if em != nil { + if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil { return cid.Undef, cid.Undef, err } } @@ -440,8 +433,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp if actErr != nil { return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr) } - if cb != nil { - if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil { + if em != nil { + if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) } } @@ -483,7 +476,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp return st, rectroot, nil } -func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, cb ExecCallback) (cid.Cid, cid.Cid, error) { +func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, cid.Cid, error) { ctx, span := trace.StartSpan(ctx, "computeTipSetState") defer span.End() @@ -519,7 +512,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet baseFee := blks[0].ParentBaseFee - return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts) + return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts) } func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid { @@ -913,10 +906,7 @@ func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([] if ts == nil { ts = sm.cs.GetHeaviestTipSet() } - st, _, err := sm.TipSetState(ctx, ts) - if err != nil { - return nil, err - } + st := ts.ParentState() stateTree, err := sm.StateTree(st) if err != nil { diff --git a/chain/stmgr/tracers.go b/chain/stmgr/tracers.go new file mode 100644 index 000000000..6bcd7bc15 --- /dev/null +++ b/chain/stmgr/tracers.go @@ -0,0 +1,56 @@ +package stmgr + +import ( + "context" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/ipfs/go-cid" +) + +type ExecMonitor interface { + // MessageApplied is called after a message has been applied. Returning an error will halt execution of any further messages. + MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error +} + +var _ ExecMonitor = (*InvocationTracer)(nil) + +type InvocationTracer struct { + trace *[]*api.InvocResult +} + +func (i *InvocationTracer) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error { + ir := &api.InvocResult{ + MsgCid: mcid, + Msg: msg, + MsgRct: &ret.MessageReceipt, + ExecutionTrace: ret.ExecutionTrace, + Duration: ret.Duration, + } + if ret.ActorErr != nil { + ir.Error = ret.ActorErr.Error() + } + if ret.GasCosts != nil { + ir.GasCost = MakeMsgGasCost(msg, ret) + } + *i.trace = append(*i.trace, ir) + return nil +} + +var _ ExecMonitor = (*messageFinder)(nil) + +type messageFinder struct { + mcid cid.Cid // the message cid to find + outm *types.Message + outr *vm.ApplyRet +} + +func (m *messageFinder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error { + if m.mcid == mcid { + m.outm = msg + m.outr = ret + return errHaltExecution // message was found, no need to continue + } + return nil +} diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index ce6d0bf2b..d2a2c6e60 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -107,8 +107,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres var found bool mpow, found, err = pas.MinerPower(maddr) if err != nil || !found { - // TODO: return an error when not found? - return power.Claim{}, power.Claim{}, false, err + return power.Claim{}, tpow, false, err } minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr) @@ -343,7 +342,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, for i := ts.Height(); i < height; i++ { // handle state forks - base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts) + base, err = sm.handleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts) if err != nil { return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err) } diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go new file mode 100644 index 000000000..81bbab6ea --- /dev/null +++ b/chain/store/checkpoint_test.go @@ -0,0 +1,89 @@ +package store_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/chain/gen" +) + +func TestChainCheckpoint(t *testing.T) { + cg, err := gen.NewGenerator() + if err != nil { + t.Fatal(err) + } + + // Let the first miner mine some blocks. + last := cg.CurTipset.TipSet() + for i := 0; i < 4; i++ { + ts, err := cg.NextTipSetFromMiners(last, cg.Miners[:1], 0) + require.NoError(t, err) + + last = ts.TipSet.TipSet() + } + + cs := cg.ChainStore() + + checkpoint := last + checkpointParents, err := cs.GetTipSetFromKey(checkpoint.Parents()) + require.NoError(t, err) + + // Set the head to the block before the checkpoint. + err = cs.SetHead(checkpointParents) + require.NoError(t, err) + + // Verify it worked. + head := cs.GetHeaviestTipSet() + require.True(t, head.Equals(checkpointParents)) + + // Try to set the checkpoint in the future, it should fail. + err = cs.SetCheckpoint(checkpoint) + require.Error(t, err) + + // Then move the head back. + err = cs.SetHead(checkpoint) + require.NoError(t, err) + + // Verify it worked. + head = cs.GetHeaviestTipSet() + require.True(t, head.Equals(checkpoint)) + + // And checkpoint it. + err = cs.SetCheckpoint(checkpoint) + require.NoError(t, err) + + // Let the second miner miner mine a fork + last = checkpointParents + for i := 0; i < 4; i++ { + ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0) + require.NoError(t, err) + + last = ts.TipSet.TipSet() + } + + // See if the chain will take the fork, it shouldn't. + err = cs.MaybeTakeHeavierTipSet(context.Background(), last) + require.NoError(t, err) + head = cs.GetHeaviestTipSet() + require.True(t, head.Equals(checkpoint)) + + // Remove the checkpoint. + err = cs.RemoveCheckpoint() + require.NoError(t, err) + + // Now switch to the other fork. + err = cs.MaybeTakeHeavierTipSet(context.Background(), last) + require.NoError(t, err) + head = cs.GetHeaviestTipSet() + require.True(t, head.Equals(last)) + + // Setting a checkpoint on the other fork should fail. + err = cs.SetCheckpoint(checkpoint) + require.Error(t, err) + + // Setting a checkpoint on this fork should succeed. + err = cs.SetCheckpoint(checkpointParents) + require.NoError(t, err) +} diff --git a/chain/store/store.go b/chain/store/store.go index e2aa0100d..523726863 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -57,8 +57,11 @@ import ( var log = logging.Logger("chainstore") -var chainHeadKey = dstore.NewKey("head") -var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") +var ( + chainHeadKey = dstore.NewKey("head") + checkpointKey = dstore.NewKey("/chain/checks") + blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") +) var DefaultTipSetCacheSize = 8192 var DefaultMsgMetaCacheSize = 2048 @@ -118,6 +121,7 @@ type ChainStore struct { heaviestLk sync.RWMutex heaviest *types.TipSet + checkpoint *types.TipSet bestTips *pubsub.PubSub pubLk sync.Mutex @@ -218,6 +222,15 @@ func (cs *ChainStore) Close() error { } func (cs *ChainStore) Load() error { + if err := cs.loadHead(); err != nil { + return err + } + if err := cs.loadCheckpoint(); err != nil { + return err + } + return nil +} +func (cs *ChainStore) loadHead() error { head, err := cs.metadataDs.Get(chainHeadKey) if err == dstore.ErrNotFound { log.Warn("no previous chain state found") @@ -242,6 +255,31 @@ func (cs *ChainStore) Load() error { return nil } +func (cs *ChainStore) loadCheckpoint() error { + tskBytes, err := cs.metadataDs.Get(checkpointKey) + if err == dstore.ErrNotFound { + return nil + } + if err != nil { + return xerrors.Errorf("failed to load checkpoint from datastore: %w", err) + } + + var tsk types.TipSetKey + err = json.Unmarshal(tskBytes, &tsk) + if err != nil { + return err + } + + ts, err := cs.LoadTipSet(tsk) + if err != nil { + return xerrors.Errorf("loading tipset: %w", err) + } + + cs.checkpoint = ts + + return nil +} + func (cs *ChainStore) writeHead(ts *types.TipSet) error { data, err := json.Marshal(ts.Cids()) if err != nil { @@ -455,6 +493,11 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e return false, nil } + // Now check to see if we've walked back to the checkpoint. + if synced.Equals(cs.checkpoint) { + return true, nil + } + // If we didn't, go back *one* tipset on the `synced` side (incrementing // the `forkLength`). if synced.Height() == 0 { @@ -483,6 +526,9 @@ func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() + if err := cs.removeCheckpoint(); err != nil { + return err + } cs.heaviest = ts err := cs.writeHead(ts) @@ -660,13 +706,80 @@ func FlushValidationCache(ds datastore.Batching) error { } // SetHead sets the chainstores current 'best' head node. -// This should only be called if something is broken and needs fixing +// This should only be called if something is broken and needs fixing. +// +// This function will bypass and remove any checkpoints. func (cs *ChainStore) SetHead(ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() + if err := cs.removeCheckpoint(); err != nil { + return err + } return cs.takeHeaviestTipSet(context.TODO(), ts) } +// RemoveCheckpoint removes the current checkpoint. +func (cs *ChainStore) RemoveCheckpoint() error { + cs.heaviestLk.Lock() + defer cs.heaviestLk.Unlock() + return cs.removeCheckpoint() +} + +func (cs *ChainStore) removeCheckpoint() error { + if err := cs.metadataDs.Delete(checkpointKey); err != nil { + return err + } + cs.checkpoint = nil + return nil +} + +// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. +// +// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past. +func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { + tskBytes, err := json.Marshal(ts.Key()) + if err != nil { + return err + } + + cs.heaviestLk.Lock() + defer cs.heaviestLk.Unlock() + + if ts.Height() > cs.heaviest.Height() { + return xerrors.Errorf("cannot set a checkpoint in the future") + } + + // Otherwise, this operation could get _very_ expensive. + if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold { + return xerrors.Errorf("cannot set a checkpoint before the fork threshold") + } + + if !ts.Equals(cs.heaviest) { + anc, err := cs.IsAncestorOf(ts, cs.heaviest) + if err != nil { + return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) + } + + if !anc { + return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) + } + } + err = cs.metadataDs.Put(checkpointKey, tskBytes) + if err != nil { + return err + } + + cs.checkpoint = ts + return nil +} + +func (cs *ChainStore) GetCheckpoint() *types.TipSet { + cs.heaviestLk.RLock() + chkpt := cs.checkpoint + cs.heaviestLk.RUnlock() + return chkpt +} + // Contains returns whether our BlockStore has all blocks in the supplied TipSet. func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { @@ -758,6 +871,14 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, return cs.LoadTipSet(l[len(l)-1].Parents()) } +// ReorgOps takes two tipsets (which can be at different heights), and walks +// their corresponding chains backwards one step at a time until we find +// a common ancestor. It then returns the respective chain segments that fork +// from the identified ancestor, in reverse order, where the first element of +// each slice is the supplied tipset, and the last element is the common +// ancestor. +// +// If an error happens along the way, we return the error with nil slices. func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { return ReorgOps(cs.LoadTipSet, a, b) } @@ -1156,6 +1277,9 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return blscids, secpkcids, nil } +// GetPath returns the sequence of atomic head change operations that +// need to be applied in order to switch the head of the chain from the `from` +// tipset to the `to` tipset. func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { fts, err := cs.LoadTipSet(from) if err != nil { diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 7452d31a9..115c33261 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -81,13 +81,13 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha log.Debug("about to fetch messages for block from pubsub") bmsgs, err := FetchMessagesByCids(ctx, ses, blk.BlsMessages) if err != nil { - log.Errorf("failed to fetch all bls messages for block received over pubusb: %s; source: %s", err, src) + log.Errorf("failed to fetch all bls messages for block received over pubsub: %s; source: %s", err, src) return } smsgs, err := FetchSignedMessagesByCids(ctx, ses, blk.SecpkMessages) if err != nil { - log.Errorf("failed to fetch all secpk messages for block received over pubusb: %s; source: %s", err, src) + log.Errorf("failed to fetch all secpk messages for block received over pubsub: %s; source: %s", err, src) return } diff --git a/chain/sync.go b/chain/sync.go index d908f3fd8..167856927 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -131,10 +131,6 @@ type Syncer struct { tickerCtxCancel context.CancelFunc - checkptLk sync.Mutex - - checkpt types.TipSetKey - ds dtypes.MetadataDS } @@ -152,14 +148,8 @@ func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.C return nil, err } - cp, err := loadCheckpoint(ds) - if err != nil { - return nil, xerrors.Errorf("error loading mpool config: %w", err) - } - s := &Syncer{ ds: ds, - checkpt: cp, beacon: beacon, bad: NewBadBlockCache(), Genesis: gent, @@ -561,7 +551,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { return nil } - if err := syncer.collectChain(ctx, maybeHead, hts); err != nil { + if err := syncer.collectChain(ctx, maybeHead, hts, false); err != nil { span.AddAttributes(trace.StringAttribute("col_error", err.Error())) span.SetStatus(trace.Status{ Code: 13, @@ -1257,7 +1247,7 @@ func extractSyncState(ctx context.Context) *SyncerState { // // All throughout the process, we keep checking if the received blocks are in // the deny list, and short-circuit the process if so. -func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { +func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) { ctx, span := trace.StartSpan(ctx, "collectHeaders") defer span.End() ss := extractSyncState(ctx) @@ -1426,7 +1416,7 @@ loop: // We have now ascertained that this is *not* a 'fast forward' log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height()) - fork, err := syncer.syncFork(ctx, base, known) + fork, err := syncer.syncFork(ctx, base, known, ignoreCheckpoint) if err != nil { if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) { // TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish? @@ -1452,11 +1442,14 @@ var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkp // If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint), // we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain // fragment until the fork point to the returned []TipSet. -func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { +func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet, ignoreCheckpoint bool) ([]*types.TipSet, error) { - chkpt := syncer.GetCheckpoint() - if known.Key() == chkpt { - return nil, ErrForkCheckpoint + var chkpt *types.TipSet + if !ignoreCheckpoint { + chkpt = syncer.store.GetCheckpoint() + if known.Equals(chkpt) { + return nil, ErrForkCheckpoint + } } // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes. @@ -1498,7 +1491,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know } // We will be forking away from nts, check that it isn't checkpointed - if nts.Key() == chkpt { + if nts.Equals(chkpt) { return nil, ErrForkCheckpoint } @@ -1709,14 +1702,14 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co // // 3. StageMessages: having acquired the headers and found a common tipset, // we then move forward, requesting the full blocks, including the messages. -func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet) error { +func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet, ignoreCheckpoint bool) error { ctx, span := trace.StartSpan(ctx, "collectChain") defer span.End() ss := extractSyncState(ctx) ss.Init(hts, ts) - headers, err := syncer.collectHeaders(ctx, ts, hts) + headers, err := syncer.collectHeaders(ctx, ts, hts, ignoreCheckpoint) if err != nil { ss.Error(err) return err diff --git a/chain/sync_test.go b/chain/sync_test.go index a1bd4794b..5312dff0b 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -279,7 +279,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) { stop, err := node.New(tu.ctx, node.FullAPI(&out), - node.Online(), + node.Base(), node.Repo(sourceRepo), node.MockHost(tu.mn), node.Test(), @@ -310,10 +310,11 @@ func (tu *syncTestUtil) addClientNode() int { var out api.FullNode + r := repo.NewMemory(nil) stop, err := node.New(tu.ctx, node.FullAPI(&out), - node.Online(), - node.Repo(repo.NewMemory(nil)), + node.Base(), + node.Repo(r), node.MockHost(tu.mn), node.Test(), @@ -404,12 +405,15 @@ func (tu *syncTestUtil) checkpointTs(node int, tsk types.TipSetKey) { require.NoError(tu.t, tu.nds[node].SyncCheckpoint(context.TODO(), tsk)) } +func (tu *syncTestUtil) nodeHasTs(node int, tsk types.TipSetKey) bool { + _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk) + return err == nil +} + func (tu *syncTestUtil) waitUntilNodeHasTs(node int, tsk types.TipSetKey) { - for { - _, err := tu.nds[node].ChainGetTipSet(context.TODO(), tsk) - if err != nil { - break - } + for !tu.nodeHasTs(node, tsk) { + // Time to allow for syncing and validation + time.Sleep(10 * time.Millisecond) } // Time to allow for syncing and validation @@ -434,12 +438,18 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) { tu.t.Fatal(err) } - // TODO: some sort of timeout? - for n := range hc { - for _, c := range n { - if c.Val.Equals(target) { - return + timeout := time.After(5 * time.Second) + + for { + select { + case n := <-hc: + for _, c := range n { + if c.Val.Equals(target) { + return + } } + case <-timeout: + tu.t.Fatal("waitUntilSyncTarget timeout") } } } @@ -576,15 +586,20 @@ func TestSyncFork(t *testing.T) { tu.loadChainToNode(p1) tu.loadChainToNode(p2) - phead := func() { + printHead := func() { h1, err := tu.nds[1].ChainHead(tu.ctx) require.NoError(tu.t, err) h2, err := tu.nds[2].ChainHead(tu.ctx) require.NoError(tu.t, err) - fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height()) - fmt.Println("Node 2: ", h2.Cids(), h1.Parents(), h2.Height()) + w1, err := tu.nds[1].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h1) + require.NoError(tu.t, err) + w2, err := tu.nds[2].(*impl.FullNodeAPI).ChainAPI.Chain.Weight(tu.ctx, h2) + require.NoError(tu.t, err) + + fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height(), w1) + fmt.Println("Node 2: ", h2.Cids(), h2.Parents(), h2.Height(), w2) //time.Sleep(time.Second * 2) fmt.Println() fmt.Println() @@ -592,7 +607,7 @@ func TestSyncFork(t *testing.T) { fmt.Println() } - phead() + printHead() base := tu.g.CurTipset fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) @@ -612,6 +627,8 @@ func TestSyncFork(t *testing.T) { fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height()) + printHead() + // Now for the fun part!! require.NoError(t, tu.mn.LinkAll()) @@ -619,7 +636,7 @@ func TestSyncFork(t *testing.T) { tu.waitUntilSyncTarget(p1, b.TipSet()) tu.waitUntilSyncTarget(p2, b.TipSet()) - phead() + printHead() } // This test crafts a tipset with 2 blocks, A and B. @@ -920,8 +937,6 @@ func TestSyncInputs(t *testing.T) { } func TestSyncCheckpointHead(t *testing.T) { - t.Skip("flaky") - H := 10 tu := prepSyncTest(t, H) @@ -959,13 +974,16 @@ func TestSyncCheckpointHead(t *testing.T) { tu.connect(p1, p2) tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) - require.Equal(tu.t, p1Head, a.TipSet()) + require.True(tu.t, p1Head.Equals(a.TipSet())) tu.assertBad(p1, b.TipSet()) + + // Should be able to switch forks. + tu.checkpointTs(p1, b.TipSet().Key()) + p1Head = tu.getHead(p1) + require.True(tu.t, p1Head.Equals(b.TipSet())) } func TestSyncCheckpointEarlierThanHead(t *testing.T) { - t.Skip("flaky") - H := 10 tu := prepSyncTest(t, H) @@ -1003,8 +1021,13 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { tu.connect(p1, p2) tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) - require.Equal(tu.t, p1Head, a.TipSet()) + require.True(tu.t, p1Head.Equals(a.TipSet())) tu.assertBad(p1, b.TipSet()) + + // Should be able to switch forks. + tu.checkpointTs(p1, b.TipSet().Key()) + p1Head = tu.getHead(p1) + require.True(tu.t, p1Head.Equals(b.TipSet())) } func TestDrandNull(t *testing.T) { diff --git a/chain/types/bigint.go b/chain/types/bigint.go index da4857d5b..72ef52128 100644 --- a/chain/types/bigint.go +++ b/chain/types/bigint.go @@ -47,6 +47,11 @@ func BigDiv(a, b BigInt) BigInt { return BigInt{Int: big.NewInt(0).Div(a.Int, b.Int)} } +func BigDivFloat(num, den BigInt) float64 { + res, _ := new(big.Rat).SetFrac(num.Int, den.Int).Float64() + return res +} + func BigMod(a, b BigInt) BigInt { return BigInt{Int: big.NewInt(0).Mod(a.Int, b.Int)} } diff --git a/chain/types/fil.go b/chain/types/fil.go index 0d6762851..21125e6d6 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -51,6 +51,15 @@ func (f FIL) Short() string { return strings.TrimRight(strings.TrimRight(r.FloatString(3), "0"), ".") + " " + prefix + "FIL" } +func (f FIL) Nano() string { + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(1e9))) + if r.Sign() == 0 { + return "0" + } + + return strings.TrimRight(strings.TrimRight(r.FloatString(9), "0"), ".") + " nFIL" +} + func (f FIL) Format(s fmt.State, ch rune) { switch ch { case 's', 'v': diff --git a/chain/types/tipset_key.go b/chain/types/tipset_key.go index e5bc7750d..9f9887796 100644 --- a/chain/types/tipset_key.go +++ b/chain/types/tipset_key.go @@ -47,7 +47,7 @@ func NewTipSetKey(cids ...cid.Cid) TipSetKey { func TipSetKeyFromBytes(encoded []byte) (TipSetKey, error) { _, err := decodeKey(encoded) if err != nil { - return TipSetKey{}, err + return EmptyTSK, err } return TipSetKey{string(encoded)}, nil } diff --git a/chain/types/tipset_key_test.go b/chain/types/tipset_key_test.go index 7b3ce439d..73c1ca9df 100644 --- a/chain/types/tipset_key_test.go +++ b/chain/types/tipset_key_test.go @@ -19,7 +19,7 @@ func TestTipSetKey(t *testing.T) { fmt.Println(len(c1.Bytes())) t.Run("zero value", func(t *testing.T) { - assert.Equal(t, TipSetKey{}, NewTipSetKey()) + assert.Equal(t, EmptyTSK, NewTipSetKey()) }) t.Run("CID extraction", func(t *testing.T) { diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 7c40fed62..2845c7696 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -229,7 +229,7 @@ func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparat func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { var err error var res []byte - if rt.vm.GetNtwkVersion(rt.ctx, randEpoch) >= network.Version13 { + if randEpoch > build.UpgradeHyperdriveHeight { res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy) } else { res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy) diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index bb93fce8d..0cbefd1fd 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -267,7 +267,7 @@ func (ss *syscallShim) VerifySeal(info proof5.SealVerifyInfo) error { proof := info.Proof seed := []byte(info.InteractiveRandomness) - log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) + log.Debugf("Verif r:%s; d:%s; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) //func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber) ok, err := ss.verifier.VerifySeal(info) diff --git a/chain/vm/vm.go b/chain/vm/vm.go index d3fc9ee7a..5a31187b7 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "reflect" "sync/atomic" "time" @@ -203,7 +202,8 @@ type ( ) type VM struct { - cstate *state.StateTree + cstate *state.StateTree + // TODO: Is base actually used? Can we delete it? base cid.Cid cst *cbor.BasicIpldStore buf *blockstore.BufferedBlockstore @@ -439,6 +439,8 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, GasCosts: &gasOutputs, Duration: time.Since(start), + ActorErr: aerrors.Newf(exitcode.SysErrOutOfGas, + "message gas limit does not cover on-chain gas costs"), }, nil } @@ -669,35 +671,10 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { return root, nil } -// MutateState usage: MutateState(ctx, idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...}) -func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error { - act, err := vm.cstate.GetActor(addr) - if err != nil { - return xerrors.Errorf("actor not found: %w", err) - } - - st := reflect.New(reflect.TypeOf(fn).In(1).Elem()) - if err := vm.cst.Get(ctx, act.Head, st.Interface()); err != nil { - return xerrors.Errorf("read actor head: %w", err) - } - - out := reflect.ValueOf(fn).Call([]reflect.Value{reflect.ValueOf(vm.cst), st}) - if !out[0].IsNil() && out[0].Interface().(error) != nil { - return out[0].Interface().(error) - } - - head, err := vm.cst.Put(ctx, st.Interface()) - if err != nil { - return xerrors.Errorf("put new actor head: %w", err) - } - - act.Head = head - - if err := vm.cstate.SetActor(addr, act); err != nil { - return xerrors.Errorf("set actor: %w", err) - } - - return nil +// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced +// during this VM's execution. +func (vm *VM) ActorStore(ctx context.Context) adt.Store { + return adt.WrapStore(ctx, vm.cst) } func linksForObj(blk block.Block, cb func(cid.Cid)) error { diff --git a/chain/wallet/multi.go b/chain/wallet/multi.go index 1fee4f040..a88475c2e 100644 --- a/chain/wallet/multi.go +++ b/chain/wallet/multi.go @@ -4,6 +4,7 @@ import ( "context" "go.uber.org/fx" + "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -56,18 +57,18 @@ func nonNil(wallets ...getif) []api.Wallet { func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.Wallet, error) { ws := nonNil(wallets...) + var merr error + for _, w := range ws { have, err := w.WalletHas(ctx, address) - if err != nil { - return nil, err - } + merr = multierr.Append(merr, err) - if have { + if err == nil && have { return w, nil } } - return nil, nil + return nil, merr } func (m MultiWallet) WalletNew(ctx context.Context, keyType types.KeyType) (address.Address, error) { diff --git a/cli/chain.go b/cli/chain.go index 9954813de..e30a685dd 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -31,6 +31,7 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" @@ -535,7 +536,7 @@ var ChainListCmd = &cli.Command{ Aliases: []string{"love"}, Usage: "View a segment of the chain", Flags: []cli.Flag{ - &cli.Uint64Flag{Name: "height"}, + &cli.Uint64Flag{Name: "height", DefaultText: "current head"}, &cli.IntFlag{Name: "count", Value: 30}, &cli.StringFlag{ Name: "format", @@ -723,12 +724,6 @@ var ChainGetCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } p = "/ipfs/" + ts.ParentState().String() + p if cctx.Bool("verbose") { fmt.Println(p) @@ -1035,7 +1030,9 @@ var ChainExportCmd = &cli.Command{ ArgsUsage: "[outputPath]", Flags: []cli.Flag{ &cli.StringFlag{ - Name: "tipset", + Name: "tipset", + Usage: "specify tipset to start the export from", + Value: "@head", }, &cli.Int64Flag{ Name: "recent-stateroots", @@ -1122,11 +1119,13 @@ var SlashConsensusFault = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + a := srv.FullNodeAPI() ctx := ReqContext(cctx) c1, err := cid.Parse(cctx.Args().Get(0)) @@ -1134,7 +1133,7 @@ var SlashConsensusFault = &cli.Command{ return xerrors.Errorf("parsing cid 1: %w", err) } - b1, err := api.ChainGetBlock(ctx, c1) + b1, err := a.ChainGetBlock(ctx, c1) if err != nil { return xerrors.Errorf("getting block 1: %w", err) } @@ -1144,7 +1143,7 @@ var SlashConsensusFault = &cli.Command{ return xerrors.Errorf("parsing cid 2: %w", err) } - b2, err := api.ChainGetBlock(ctx, c2) + b2, err := a.ChainGetBlock(ctx, c2) if err != nil { return xerrors.Errorf("getting block 2: %w", err) } @@ -1155,7 +1154,7 @@ var SlashConsensusFault = &cli.Command{ var fromAddr address.Address if from := cctx.String("from"); from == "" { - defaddr, err := api.WalletDefaultAddress(ctx) + defaddr, err := a.WalletDefaultAddress(ctx) if err != nil { return err } @@ -1191,7 +1190,7 @@ var SlashConsensusFault = &cli.Command{ return xerrors.Errorf("parsing cid extra: %w", err) } - bExtra, err := api.ChainGetBlock(ctx, cExtra) + bExtra, err := a.ChainGetBlock(ctx, cExtra) if err != nil { return xerrors.Errorf("getting block extra: %w", err) } @@ -1209,15 +1208,17 @@ var SlashConsensusFault = &cli.Command{ return err } - msg := &types.Message{ - To: b2.Miner, - From: fromAddr, - Value: types.NewInt(0), - Method: builtin.MethodsMiner.ReportConsensusFault, - Params: enc, + proto := &api.MessagePrototype{ + Message: types.Message{ + To: b2.Miner, + From: fromAddr, + Value: types.NewInt(0), + Method: builtin.MethodsMiner.ReportConsensusFault, + Params: enc, + }, } - smsg, err := api.MpoolPushMessage(ctx, msg, nil) + smsg, err := InteractiveSend(ctx, cctx, srv, proto) if err != nil { return err } diff --git a/cli/client.go b/cli/client.go index cd1d3d505..774d9aa5f 100644 --- a/cli/client.go +++ b/cli/client.go @@ -92,6 +92,7 @@ var clientCmd = &cli.Command{ WithCategory("retrieval", clientFindCmd), WithCategory("retrieval", clientRetrieveCmd), WithCategory("retrieval", clientCancelRetrievalDealCmd), + WithCategory("retrieval", clientListRetrievalsCmd), WithCategory("util", clientCommPCmd), WithCategory("util", clientCarGenCmd), WithCategory("util", clientBalancesCmd), @@ -307,8 +308,8 @@ var clientDealCmd = &cli.Command{ Description: `Make a deal with a miner. dataCid comes from running 'lotus client import'. miner is the address of the miner you wish to make a deal with. -price is measured in FIL/GB/Epoch. Miners usually don't accept a bid -lower than their advertised ask. You can check a miners listed price +price is measured in FIL/Epoch. Miners usually don't accept a bid +lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price with 'lotus client query-ask '. duration is how long the miner should store the data for, in blocks. The minimum value is 518400 (6 months).`, @@ -322,6 +323,10 @@ The minimum value is 518400 (6 months).`, Name: "manual-piece-size", Usage: "if manually specifying piece cid, used to specify size (dataCid must be to a car file)", }, + &cli.BoolFlag{ + Name: "manual-stateless-deal", + Usage: "instructs the node to send an offline deal without registering it with the deallist/fsm", + }, &cli.StringFlag{ Name: "from", Usage: "specify address to fund the deal with", @@ -461,7 +466,7 @@ The minimum value is 518400 (6 months).`, isVerified = verifiedDealParam } - proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{ + sdParams := &lapi.StartDealParams{ Data: ref, Wallet: a, Miner: miner, @@ -471,7 +476,18 @@ The minimum value is 518400 (6 months).`, FastRetrieval: cctx.Bool("fast-retrieval"), VerifiedDeal: isVerified, ProviderCollateral: provCol, - }) + } + + var proposal *cid.Cid + if cctx.Bool("manual-stateless-deal") { + if ref.TransferType != storagemarket.TTManual || price.Int64() != 0 { + return xerrors.New("when manual-stateless-deal is enabled, you must also provide a 'price' of 0 and specify 'manual-piece-cid' and 'manual-piece-size'") + } + proposal, err = api.ClientStatelessDeal(ctx, sdParams) + } else { + proposal, err = api.ClientStartDeal(ctx, sdParams) + } + if err != nil { return err } @@ -1169,6 +1185,8 @@ var clientRetrieveCmd = &cli.Command{ return xerrors.Errorf("error setting up retrieval: %w", err) } + var prevStatus retrievalmarket.DealStatus + for { select { case evt, ok := <-updates: @@ -1179,14 +1197,23 @@ var clientRetrieveCmd = &cli.Command{ retrievalmarket.ClientEvents[evt.Event], retrievalmarket.DealStatuses[evt.Status], ) - } else { - afmt.Println("Success") - return nil + prevStatus = evt.Status } if evt.Err != "" { return xerrors.Errorf("retrieval failed: %s", evt.Err) } + + if !ok { + if prevStatus == retrievalmarket.DealStatusCompleted { + afmt.Println("Success") + } else { + afmt.Printf("saw final deal state %s instead of expected success state DealStatusCompleted\n", + retrievalmarket.DealStatuses[prevStatus]) + } + return nil + } + case <-ctx.Done(): return xerrors.Errorf("retrieval timed out") } @@ -1194,6 +1221,198 @@ var clientRetrieveCmd = &cli.Command{ }, } +var clientListRetrievalsCmd = &cli.Command{ + Name: "list-retrievals", + Usage: "List retrieval market deals", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "print verbose deal details", + }, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + &cli.BoolFlag{ + Name: "show-failed", + Usage: "show failed/failing deals", + Value: true, + }, + &cli.BoolFlag{ + Name: "completed", + Usage: "show completed retrievals", + }, + &cli.BoolFlag{ + Name: "watch", + Usage: "watch deal updates in real-time, rather than a one time list", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + verbose := cctx.Bool("verbose") + watch := cctx.Bool("watch") + showFailed := cctx.Bool("show-failed") + completed := cctx.Bool("completed") + + localDeals, err := api.ClientListRetrievals(ctx) + if err != nil { + return err + } + + if watch { + updates, err := api.ClientGetRetrievalUpdates(ctx) + if err != nil { + return err + } + + for { + tm.Clear() + tm.MoveCursor(1, 1) + + err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, showFailed, completed) + if err != nil { + return err + } + + tm.Flush() + + select { + case <-ctx.Done(): + return nil + case updated := <-updates: + var found bool + for i, existing := range localDeals { + if existing.ID == updated.ID { + localDeals[i] = updated + found = true + break + } + } + if !found { + localDeals = append(localDeals, updated) + } + } + } + } + + return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, showFailed, completed) + }, +} + +func isTerminalError(status retrievalmarket.DealStatus) bool { + // should patch this in go-fil-markets but to solve the problem immediate and not have buggy output + return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled +} +func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, showFailed bool, completed bool) error { + var deals []api.RetrievalInfo + for _, deal := range localDeals { + if !showFailed && isTerminalError(deal.Status) { + continue + } + if !completed && retrievalmarket.IsTerminalSuccess(deal.Status) { + continue + } + deals = append(deals, deal) + } + + tableColumns := []tablewriter.Column{ + tablewriter.Col("PayloadCID"), + tablewriter.Col("DealId"), + tablewriter.Col("Provider"), + tablewriter.Col("Status"), + tablewriter.Col("PricePerByte"), + tablewriter.Col("Received"), + tablewriter.Col("TotalPaid"), + } + + if verbose { + tableColumns = append(tableColumns, + tablewriter.Col("PieceCID"), + tablewriter.Col("UnsealPrice"), + tablewriter.Col("BytesPaidFor"), + tablewriter.Col("TransferChannelID"), + tablewriter.Col("TransferStatus"), + ) + } + tableColumns = append(tableColumns, tablewriter.NewLineCol("Message")) + + w := tablewriter.New(tableColumns...) + + for _, d := range deals { + w.Write(toRetrievalOutput(d, verbose)) + } + + return w.Flush(out) +} + +func toRetrievalOutput(d api.RetrievalInfo, verbose bool) map[string]interface{} { + + payloadCID := d.PayloadCID.String() + provider := d.Provider.String() + if !verbose { + payloadCID = ellipsis(payloadCID, 8) + provider = ellipsis(provider, 8) + } + + retrievalOutput := map[string]interface{}{ + "PayloadCID": payloadCID, + "DealId": d.ID, + "Provider": provider, + "Status": retrievalStatusString(d.Status), + "PricePerByte": types.FIL(d.PricePerByte), + "Received": units.BytesSize(float64(d.BytesReceived)), + "TotalPaid": types.FIL(d.TotalPaid), + "Message": d.Message, + } + + if verbose { + transferChannelID := "" + if d.TransferChannelID != nil { + transferChannelID = d.TransferChannelID.String() + } + transferStatus := "" + if d.DataTransfer != nil { + transferStatus = datatransfer.Statuses[d.DataTransfer.Status] + } + pieceCID := "" + if d.PieceCID != nil { + pieceCID = d.PieceCID.String() + } + + retrievalOutput["PieceCID"] = pieceCID + retrievalOutput["UnsealPrice"] = types.FIL(d.UnsealPrice) + retrievalOutput["BytesPaidFor"] = units.BytesSize(float64(d.BytesPaidFor)) + retrievalOutput["TransferChannelID"] = transferChannelID + retrievalOutput["TransferStatus"] = transferStatus + } + return retrievalOutput +} + +func retrievalStatusString(status retrievalmarket.DealStatus) string { + s := retrievalmarket.DealStatuses[status] + + switch { + case isTerminalError(status): + return color.RedString(s) + case retrievalmarket.IsTerminalSuccess(status): + return color.GreenString(s) + default: + return s + } +} + var clientInspectDealCmd = &cli.Command{ Name: "inspect-deal", Usage: "Inspect detailed information about deal's lifecycle and the various stages it goes through", @@ -1296,7 +1515,8 @@ var clientListAsksCmd = &cli.Command{ Usage: "List asks for top miners", Flags: []cli.Flag{ &cli.BoolFlag{ - Name: "by-ping", + Name: "by-ping", + Usage: "sort by ping", }, &cli.StringFlag{ Name: "output-format", @@ -1451,17 +1671,17 @@ loop: } rt := time.Now() - _, err = api.ClientQueryAsk(ctx, *mi.PeerId, miner) if err != nil { return } + pingDuration := time.Now().Sub(rt) atomic.AddInt64(&got, 1) lk.Lock() asks = append(asks, QueriedAsk{ Ask: ask, - Ping: time.Now().Sub(rt), + Ping: pingDuration, }) lk.Unlock() }(miner) @@ -1540,7 +1760,7 @@ var clientQueryAskCmd = &cli.Command{ return xerrors.Errorf("failed to get peerID for miner: %w", err) } - if *mi.PeerId == peer.ID("SETME") { + if mi.PeerId == nil || *mi.PeerId == peer.ID("SETME") { return fmt.Errorf("the miner hasn't initialized yet") } @@ -1585,9 +1805,9 @@ var clientListDeals = &cli.Command{ Usage: "print verbose deal details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "show-failed", @@ -1599,6 +1819,10 @@ var clientListDeals = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -1607,7 +1831,6 @@ var clientListDeals = &cli.Command{ ctx := ReqContext(cctx) verbose := cctx.Bool("verbose") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") @@ -1626,7 +1849,7 @@ var clientListDeals = &cli.Command{ tm.Clear() tm.MoveCursor(1, 1) - err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color, showFailed) + err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, showFailed) if err != nil { return err } @@ -1652,7 +1875,7 @@ var clientListDeals = &cli.Command{ } } - return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, color, showFailed) + return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, showFailed) }, } @@ -1675,7 +1898,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS } } -func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, color bool, showFailed bool) error { +func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, showFailed bool) error { sort.Slice(localDeals, func(i, j int) bool { return localDeals[i].CreationTime.Before(localDeals[j].CreationTime) }) @@ -1727,7 +1950,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, - dealStateString(color, d.LocalDeal.State), + dealStateString(d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, @@ -1776,7 +1999,7 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, "DealCid": propcid, "DealId": d.LocalDeal.DealID, "Provider": d.LocalDeal.Provider, - "State": dealStateString(color, d.LocalDeal.State), + "State": dealStateString(d.LocalDeal.State), "On Chain?": onChain, "Slashed?": slashed, "PieceCID": piece, @@ -1791,12 +2014,8 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, return w.Flush(out) } -func dealStateString(c bool, state storagemarket.StorageDealStatus) string { +func dealStateString(state storagemarket.StorageDealStatus) string { s := storagemarket.DealStates[state] - if !c { - return s - } - switch state { case storagemarket.StorageDealError, storagemarket.StorageDealExpired: return color.RedString(s) @@ -2115,9 +2334,9 @@ var clientListTransfers = &cli.Command{ Usage: "print verbose transfer details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "completed", @@ -2133,6 +2352,10 @@ var clientListTransfers = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -2147,7 +2370,6 @@ var clientListTransfers = &cli.Command{ verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") if watch { @@ -2161,7 +2383,7 @@ var clientListTransfers = &cli.Command{ tm.MoveCursor(1, 1) - OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) + OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) tm.Flush() @@ -2186,13 +2408,13 @@ var clientListTransfers = &cli.Command{ } } } - OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) + OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) return nil }, } // OutputDataTransferChannels generates table output for a list of channels -func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, color, showFailed bool) { +func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, showFailed bool) { sort.Slice(channels, func(i, j int) bool { return channels[i].TransferID < channels[j].TransferID }) @@ -2222,7 +2444,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range sendingChannels { - w.Write(toChannelOutput(color, "Sending To", channel, verbose)) + w.Write(toChannelOutput("Sending To", channel, verbose)) } w.Flush(out) //nolint:errcheck @@ -2236,17 +2458,13 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann tablewriter.Col("Voucher"), tablewriter.NewLineCol("Message")) for _, channel := range receivingChannels { - w.Write(toChannelOutput(color, "Receiving From", channel, verbose)) + w.Write(toChannelOutput("Receiving From", channel, verbose)) } w.Flush(out) //nolint:errcheck } -func channelStatusString(useColor bool, status datatransfer.Status) string { +func channelStatusString(status datatransfer.Status) string { s := datatransfer.Statuses[status] - if !useColor { - return s - } - switch status { case datatransfer.Failed, datatransfer.Cancelled: return color.RedString(s) @@ -2257,7 +2475,7 @@ func channelStatusString(useColor bool, status datatransfer.Status) string { } } -func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { +func toChannelOutput(otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} { rootCid := channel.BaseCID.String() otherParty := channel.OtherPeer.String() if !verbose { @@ -2277,7 +2495,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr return map[string]interface{}{ "ID": channel.TransferID, - "Status": channelStatusString(useColor, channel.Status), + "Status": channelStatusString(channel.Status), otherPartyColumn: otherParty, "Root Cid": rootCid, "Initiated?": initiated, diff --git a/cli/client_test.go b/cli/client_test.go deleted file mode 100644 index f0e8efda8..000000000 --- a/cli/client_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import ( - "context" - "os" - "testing" - "time" - - clitest "github.com/filecoin-project/lotus/cli/test" -) - -// TestClient does a basic test to exercise the client CLI -// commands -func TestClient(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() - - blocktime := 5 * time.Millisecond - ctx := context.Background() - clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime) - clitest.RunClientTest(t, Commands, clientNode) -} diff --git a/cli/cmd.go b/cli/cmd.go index 30cce3e0a..630aae1bc 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -34,7 +34,7 @@ func GetFullNodeServices(ctx *cli.Context) (ServicesAPI, error) { return tn.(ServicesAPI), nil } - api, c, err := GetFullNodeAPI(ctx) + api, c, err := GetFullNodeAPIV1(ctx) if err != nil { return nil, err } @@ -82,6 +82,7 @@ var Commands = []*cli.Command{ WithCategory("developer", FetchParamCmd), WithCategory("network", NetCmd), WithCategory("network", SyncCmd), + WithCategory("status", StatusCmd), PprofCmd, VersionCmd, } diff --git a/cli/disputer.go b/cli/disputer.go index 235c4cf03..ceebeb939 100644 --- a/cli/disputer.go +++ b/cli/disputer.go @@ -238,6 +238,9 @@ var disputerStartCmd = &cli.Command{ dpmsgs := make([]*types.Message, 0) + startTime := time.Now() + proofsChecked := uint64(0) + // TODO: Parallelizeable for _, dl := range dls { fullDeadlines, err := api.StateMinerDeadlines(ctx, dl.miner, tsk) @@ -249,7 +252,10 @@ var disputerStartCmd = &cli.Command{ return xerrors.Errorf("deadline index %d not found in deadlines", dl.index) } - ms, err := makeDisputeWindowedPosts(ctx, api, dl, fullDeadlines[dl.index].DisputableProofCount, fromAddr) + disputableProofs := fullDeadlines[dl.index].DisputableProofCount + proofsChecked += disputableProofs + + ms, err := makeDisputeWindowedPosts(ctx, api, dl, disputableProofs, fromAddr) if err != nil { return xerrors.Errorf("failed to check for disputes: %w", err) } @@ -264,6 +270,8 @@ var disputerStartCmd = &cli.Command{ deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl) } + disputeLog.Infow("checked proofs", "count", proofsChecked, "duration", time.Since(startTime)) + // TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop for _, dpmsg := range dpmsgs { disputeLog.Infow("disputing a PoSt", "miner", dpmsg.To) diff --git a/cli/filplus.go b/cli/filplus.go index d4972bca6..007071ea2 100644 --- a/cli/filplus.go +++ b/cli/filplus.go @@ -4,8 +4,6 @@ import ( "context" "fmt" - "github.com/filecoin-project/lotus/api/v0api" - verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" "github.com/filecoin-project/go-state-types/big" @@ -16,6 +14,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" diff --git a/cli/init_test.go b/cli/init_test.go new file mode 100644 index 000000000..8c343bcfa --- /dev/null +++ b/cli/init_test.go @@ -0,0 +1,9 @@ +package cli + +import ( + logging "github.com/ipfs/go-log/v2" +) + +func init() { + logging.SetLogLevel("watchdog", "ERROR") +} diff --git a/cli/mpool.go b/cli/mpool.go index 025a2fc3f..b128ccc15 100644 --- a/cli/mpool.go +++ b/cli/mpool.go @@ -34,6 +34,7 @@ var MpoolCmd = &cli.Command{ MpoolFindCmd, MpoolConfig, MpoolGasPerfCmd, + mpoolManage, }, } diff --git a/cli/mpool_manage.go b/cli/mpool_manage.go new file mode 100644 index 000000000..164a05842 --- /dev/null +++ b/cli/mpool_manage.go @@ -0,0 +1,360 @@ +package cli + +import ( + "context" + "fmt" + "sort" + + "github.com/Kubuxu/imtui" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/messagepool" + types "github.com/filecoin-project/lotus/chain/types" + "github.com/gdamore/tcell/v2" + cid "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var mpoolManage = &cli.Command{ + Name: "manage", + Action: func(cctx *cli.Context) error { + srv, err := GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + ctx := ReqContext(cctx) + + _, localAddr, err := srv.LocalAddresses(ctx) + if err != nil { + return xerrors.Errorf("getting local addresses: %w", err) + } + + msgs, err := srv.MpoolPendingFilter(ctx, func(sm *types.SignedMessage) bool { + if sm.Message.From.Empty() { + return false + } + for _, a := range localAddr { + if a == sm.Message.From { + return true + } + } + return false + }, types.EmptyTSK) + if err != nil { + return err + } + + t, err := imtui.NewTui() + if err != nil { + panic(err) + } + + mm := &mmUI{ + ctx: ctx, + srv: srv, + addrs: localAddr, + messages: msgs, + } + sort.Slice(mm.addrs, func(i, j int) bool { + return mm.addrs[i].String() < mm.addrs[j].String() + }) + t.PushScene(mm.addrSelect()) + + err = t.Run() + + if err != nil { + panic(err) + } + + return nil + }, +} + +type mmUI struct { + ctx context.Context + srv ServicesAPI + addrs []address.Address + messages []*types.SignedMessage +} + +func (mm *mmUI) addrSelect() func(*imtui.Tui) error { + rows := [][]string{{"Address", "No. Messages"}} + mCount := map[address.Address]int{} + for _, sm := range mm.messages { + mCount[sm.Message.From]++ + } + for _, a := range mm.addrs { + rows = append(rows, []string{a.String(), fmt.Sprintf("%d", mCount[a])}) + } + + flex := []int{4, 1} + sel := 0 + scroll := 0 + return func(t *imtui.Tui) error { + if t.CurrentKey != nil && t.CurrentKey.Key() == tcell.KeyEnter { + if sel > 0 { + t.ReplaceScene(mm.messageLising(mm.addrs[sel-1])) + } + } + t.FlexTable(0, 0, 0, &sel, &scroll, rows, flex, true) + return nil + } +} + +func errUI(err error) func(*imtui.Tui) error { + return func(t *imtui.Tui) error { + return err + } +} + +type msgInfo struct { + sm *types.SignedMessage + checks []api.MessageCheckStatus +} + +func (mi *msgInfo) Row() []string { + cidStr := mi.sm.Cid().String() + failedChecks := 0 + for _, c := range mi.checks { + if !c.OK { + failedChecks++ + } + } + shortAddr := mi.sm.Message.To.String() + if len(shortAddr) > 16 { + shortAddr = "…" + shortAddr[len(shortAddr)-16:] + } + var fCk string + if failedChecks == 0 { + fCk = "[:green:]OK" + } else { + fCk = "[:orange:]" + fmt.Sprintf("%d", failedChecks) + } + return []string{"…" + cidStr[len(cidStr)-32:], shortAddr, + fmt.Sprintf("%d", mi.sm.Message.Nonce), types.FIL(mi.sm.Message.Value).String(), + fmt.Sprintf("%d", mi.sm.Message.Method), fCk} + +} + +func (mm *mmUI) messageLising(a address.Address) func(*imtui.Tui) error { + genMsgInfos := func() ([]msgInfo, error) { + msgs, err := mm.srv.MpoolPendingFilter(mm.ctx, func(sm *types.SignedMessage) bool { + if sm.Message.From.Empty() { + return false + } + if a == sm.Message.From { + return true + } + return false + }, types.EmptyTSK) + + if err != nil { + return nil, xerrors.Errorf("getting pending: %w", err) + } + + msgIdx := map[cid.Cid]*types.SignedMessage{} + for _, sm := range msgs { + if sm.Message.From == a { + msgIdx[sm.Message.Cid()] = sm + msgIdx[sm.Cid()] = sm + } + } + + checks, err := mm.srv.MpoolCheckPendingMessages(mm.ctx, a) + if err != nil { + return nil, xerrors.Errorf("checking pending: %w", err) + } + msgInfos := make([]msgInfo, 0, len(checks)) + for _, msgChecks := range checks { + failingChecks := []api.MessageCheckStatus{} + for _, c := range msgChecks { + if !c.OK { + failingChecks = append(failingChecks, c) + } + } + msgInfos = append(msgInfos, msgInfo{ + sm: msgIdx[msgChecks[0].Cid], + checks: failingChecks, + }) + } + return msgInfos, nil + } + + sel := 0 + scroll := 0 + + var msgInfos []msgInfo + var rows [][]string + flex := []int{3, 2, 1, 1, 1, 1} + refresh := true + + return func(t *imtui.Tui) error { + if refresh { + var err error + msgInfos, err = genMsgInfos() + if err != nil { + return xerrors.Errorf("getting msgInfos: %w", err) + } + + rows = [][]string{{"Message Cid", "To", "Nonce", "Value", "Method", "Checks"}} + for _, mi := range msgInfos { + rows = append(rows, mi.Row()) + } + refresh = false + } + + if t.CurrentKey != nil && t.CurrentKey.Key() == tcell.KeyEnter { + if sel > 0 { + t.PushScene(mm.messageDetail(msgInfos[sel-1])) + refresh = true + return nil + } + } + + t.Label(0, 0, fmt.Sprintf("Address: %s", a), tcell.StyleDefault) + t.FlexTable(1, 0, 0, &sel, &scroll, rows, flex, true) + return nil + } +} + +func (mm *mmUI) messageDetail(mi msgInfo) func(*imtui.Tui) error { + baseFee, err := mm.srv.GetBaseFee(mm.ctx) + if err != nil { + return errUI(err) + } + _ = baseFee + + m := mi.sm.Message + maxFee := big.Mul(m.GasFeeCap, big.NewInt(m.GasLimit)) + + issues := [][]string{} + for _, c := range mi.checks { + issues = append(issues, []string{c.Code.String(), c.Err}) + } + issuesFlex := []int{1, 3} + var sel, scroll int + + executeReprice := false + executeNoop := false + return func(t *imtui.Tui) error { + if executeReprice { + m.GasFeeCap = big.Div(maxFee, big.NewInt(m.GasLimit)) + m.GasPremium = messagepool.ComputeMinRBF(m.GasPremium) + m.GasFeeCap = big.Max(m.GasFeeCap, m.GasPremium) + + _, _, err := mm.srv.PublishMessage(mm.ctx, &api.MessagePrototype{ + Message: m, + ValidNonce: true, + }, true) + if err != nil { + return err + } + t.PopScene() + return nil + } + if executeNoop { + nop := types.Message{ + To: builtin.BurntFundsActorAddr, + From: m.From, + + Nonce: m.Nonce, + Value: big.Zero(), + } + + nop.GasPremium = messagepool.ComputeMinRBF(m.GasPremium) + + _, _, err := mm.srv.PublishMessage(mm.ctx, &api.MessagePrototype{ + Message: nop, + ValidNonce: true, + }, true) + + if err != nil { + return xerrors.Errorf("publishing noop message: %w", err) + } + + t.PopScene() + return nil + } + + if t.CurrentKey != nil { + if t.CurrentKey.Key() == tcell.KeyLeft { + t.PopScene() + return nil + } + if t.CurrentKey.Key() == tcell.KeyRune { + switch t.CurrentKey.Rune() { + case 'R', 'r': + t.PushScene(feeUI(baseFee, m.GasLimit, &maxFee, &executeReprice)) + return nil + case 'N', 'n': + t.PushScene(confirmationScene( + &executeNoop, + "Are you sure you want to cancel the message by", + "replacing it with a message with no effects?")) + return nil + } + } + } + + row := 0 + defS := tcell.StyleDefault + display := func(f string, args ...interface{}) { + t.Label(0, row, fmt.Sprintf(f, args...), defS) + row++ + } + + display("Message CID: %s", m.Cid()) + display("Signed Message CID: %s", mi.sm.Cid()) + row++ + display("From: %s", m.From) + display("To: %s", m.To) + row++ + display("Nonce: %d", m.Nonce) + display("Value: %s", types.FIL(m.Value)) + row++ + display("GasLimit: %d", m.GasLimit) + display("GasPremium: %s", types.FIL(m.GasPremium).Short()) + display("GasFeeCap %s", types.FIL(m.GasFeeCap).Short()) + row++ + display("Press R to reprice this message") + display("Press N to replace this message with no-operation message") + row++ + + t.FlexTable(row, 0, 0, &sel, &scroll, issues, issuesFlex, false) + + return nil + } +} + +func confirmationScene(yes *bool, ask ...string) func(*imtui.Tui) error { + return func(t *imtui.Tui) error { + row := 0 + defS := tcell.StyleDefault + display := func(f string, args ...interface{}) { + t.Label(0, row, fmt.Sprintf(f, args...), defS) + row++ + } + + for _, a := range ask { + display(a) + } + row++ + display("Enter to confirm") + display("Esc to cancel") + + if t.CurrentKey != nil { + if t.CurrentKey.Key() == tcell.KeyEnter { + *yes = true + t.PopScene() + return nil + } + } + + return nil + } +} diff --git a/cli/multisig.go b/cli/multisig.go index f6caa6ee0..c51677d85 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -95,11 +95,13 @@ var msigCreateCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("multisigs must have at least one signer")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) var addrs []address.Address @@ -146,13 +148,20 @@ var msigCreateCmd = &cli.Command{ gp := types.NewInt(1) - msgCid, err := api.MsigCreate(ctx, required, addrs, d, intVal, sendAddr, gp) + proto, err := api.MsigCreate(ctx, required, addrs, d, intVal, sendAddr, gp) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -364,11 +373,13 @@ var msigProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must either pass three or five arguments")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -426,14 +437,21 @@ var msigProposeCmd = &cli.Command{ return fmt.Errorf("actor %s is not a multisig actor", msig) } - msgCid, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params) + proto, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("send proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -481,11 +499,13 @@ var msigApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("usage: msig approve [ ]")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -515,10 +535,17 @@ var msigApproveCmd = &cli.Command{ var msgCid cid.Cid if cctx.Args().Len() == 2 { - msgCid, err = api.MsigApprove(ctx, msig, txid, from) + proto, err := api.MsigApprove(ctx, msig, txid, from) if err != nil { return err } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() } else { proposer, err := address.NewFromString(cctx.Args().Get(2)) if err != nil { @@ -558,15 +585,22 @@ var msigApproveCmd = &cli.Command{ params = p } - msgCid, err = api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params) + proto, err := api.MsigApproveTxnHash(ctx, msig, txid, proposer, dest, types.BigInt(value), from, method, params) if err != nil { return err } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() } fmt.Println("sent approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -598,11 +632,13 @@ var msigRemoveProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -630,14 +666,21 @@ var msigRemoveProposeCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigRemoveSigner(ctx, msig, from, addr, cctx.Bool("decrease-threshold")) + proto, err := api.MsigRemoveSigner(ctx, msig, from, addr, cctx.Bool("decrease-threshold")) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent remove proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -676,11 +719,13 @@ var msigAddProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address and signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -708,14 +753,21 @@ var msigAddProposeCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigAddPropose(ctx, msig, from, addr, cctx.Bool("increase-threshold")) + proto, err := api.MsigAddPropose(ctx, msig, from, addr, cctx.Bool("increase-threshold")) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Fprintln(cctx.App.Writer, "sent add proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -743,11 +795,13 @@ var msigAddApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, transaction id, new signer address, whether to increase threshold")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -790,14 +844,21 @@ var msigAddApproveCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc) + proto, err := api.MsigAddApprove(ctx, msig, from, txid, prop, newAdd, inc) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent add approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -825,11 +886,13 @@ var msigAddCancelCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, transaction id, new signer address, whether to increase threshold")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -867,14 +930,21 @@ var msigAddCancelCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigAddCancel(ctx, msig, from, txid, newAdd, inc) + proto, err := api.MsigAddCancel(ctx, msig, from, txid, newAdd, inc) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent add cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -902,11 +972,13 @@ var msigSwapProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, old signer address, new signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -939,14 +1011,21 @@ var msigSwapProposeCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd) + proto, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent swap proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -974,11 +1053,13 @@ var msigSwapApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, transaction id, old signer address, new signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1021,14 +1102,21 @@ var msigSwapApproveCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd) + proto, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent swap approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1056,11 +1144,13 @@ var msigSwapCancelCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, transaction id, old signer address, new signer address")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1098,14 +1188,21 @@ var msigSwapCancelCmd = &cli.Command{ from = defaddr } - msgCid, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd) + proto, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent swap cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1133,11 +1230,13 @@ var msigLockProposeCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, start epoch, unlock duration, and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1185,14 +1284,21 @@ var msigLockProposeCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigPropose(ctx, msig, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent lock proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1220,11 +1326,13 @@ var msigLockApproveCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address, proposer address, tx id, start epoch, unlock duration, and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1282,14 +1390,21 @@ var msigLockApproveCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigApproveTxnHash(ctx, msig, txid, prop, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent lock approval in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1313,15 +1428,17 @@ var msigLockCancelCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.Args().Len() != 6 { + if cctx.Args().Len() != 5 { return ShowHelp(cctx, fmt.Errorf("must pass multisig address, tx id, start epoch, unlock duration, and amount")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1374,14 +1491,21 @@ var msigLockCancelCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent lock cancellation in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } @@ -1471,11 +1595,13 @@ var msigProposeThresholdCmd = &cli.Command{ return ShowHelp(cctx, fmt.Errorf("must pass multisig address and new threshold value")) } - api, closer, err := GetFullNodeAPI(cctx) + srv, err := GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := ReqContext(cctx) msig, err := address.NewFromString(cctx.Args().Get(0)) @@ -1511,14 +1637,21 @@ var msigProposeThresholdCmd = &cli.Command{ return actErr } - msgCid, err := api.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params) + proto, err := api.MsigPropose(ctx, msig, msig, types.NewInt(0), from, uint64(multisig.Methods.ChangeNumApprovalsThreshold), params) if err != nil { return fmt.Errorf("failed to propose change of threshold: %w", err) } + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid := sm.Cid() + fmt.Println("sent change threshold proposal in message: ", msgCid) - wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } diff --git a/cli/multisig_test.go b/cli/multisig_test.go deleted file mode 100644 index 82472cd62..000000000 --- a/cli/multisig_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli - -import ( - "context" - "os" - "testing" - "time" - - clitest "github.com/filecoin-project/lotus/cli/test" -) - -// TestMultisig does a basic test to exercise the multisig CLI -// commands -func TestMultisig(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() - - blocktime := 5 * time.Millisecond - ctx := context.Background() - clientNode, _ := clitest.StartOneNodeOneMiner(ctx, t, blocktime) - clitest.RunMultisigTest(t, Commands, clientNode) -} diff --git a/cli/send.go b/cli/send.go index daf73ccad..a5200d3b8 100644 --- a/cli/send.go +++ b/cli/send.go @@ -2,7 +2,6 @@ package cli import ( "encoding/hex" - "errors" "fmt" "github.com/urfave/cli/v2" @@ -59,10 +58,14 @@ var sendCmd = &cli.Command{ }, &cli.BoolFlag{ Name: "force", - Usage: "must be specified for the action to take effect if maybe SysErrInsufficientFunds etc", + Usage: "Deprecated: use global 'force-send'", }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("force") { + fmt.Println("'force' flag is deprecated, use global flag 'force-send'") + } + if cctx.Args().Len() != 2 { return ShowHelp(cctx, fmt.Errorf("'send' expects two arguments, target and amount")) } @@ -137,23 +140,22 @@ var sendCmd = &cli.Command{ params.Params = decparams } - params.Force = cctx.Bool("force") - if cctx.IsSet("nonce") { n := cctx.Uint64("nonce") params.Nonce = &n } - msgCid, err := srv.Send(ctx, params) - + proto, err := srv.MessageForSend(ctx, params) if err != nil { - if errors.Is(err, ErrSendBalanceTooLow) { - return fmt.Errorf("--force must be specified for this action to have an effect; you have been warned: %w", err) - } - return xerrors.Errorf("executing send: %w", err) + return xerrors.Errorf("creating message prototype: %w", err) } - fmt.Fprintf(cctx.App.Writer, "%s\n", msgCid) + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + fmt.Fprintf(cctx.App.Writer, "%s\n", sm.Cid()) return nil }, } diff --git a/cli/send_test.go b/cli/send_test.go index ff258346a..52eafda67 100644 --- a/cli/send_test.go +++ b/cli/send_test.go @@ -2,24 +2,17 @@ package cli import ( "bytes" - "errors" "testing" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" types "github.com/filecoin-project/lotus/chain/types" gomock "github.com/golang/mock/gomock" - cid "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" ucli "github.com/urfave/cli/v2" ) -var arbtCid = (&types.Message{ - From: mustAddr(address.NewIDAddress(2)), - To: mustAddr(address.NewIDAddress(1)), - Value: types.NewInt(1000), -}).Cid() - func mustAddr(a address.Address, err error) address.Address { if err != nil { panic(err) @@ -49,80 +42,26 @@ func TestSendCLI(t *testing.T) { app, mockSrvcs, buf, done := newMockApp(t, sendCmd) defer done() - gomock.InOrder( - mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ - To: mustAddr(address.NewIDAddress(1)), - Val: oneFil, - }).Return(arbtCid, nil), - mockSrvcs.EXPECT().Close(), - ) - err := app.Run([]string{"lotus", "send", "t01", "1"}) - assert.NoError(t, err) - assert.EqualValues(t, arbtCid.String()+"\n", buf.String()) - }) - t.Run("ErrSendBalanceTooLow", func(t *testing.T) { - app, mockSrvcs, _, done := newMockApp(t, sendCmd) - defer done() - - gomock.InOrder( - mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ - To: mustAddr(address.NewIDAddress(1)), - Val: oneFil, - }).Return(cid.Undef, ErrSendBalanceTooLow), - mockSrvcs.EXPECT().Close(), - ) - err := app.Run([]string{"lotus", "send", "t01", "1"}) - assert.ErrorIs(t, err, ErrSendBalanceTooLow) - }) - t.Run("generic-err-is-forwarded", func(t *testing.T) { - app, mockSrvcs, _, done := newMockApp(t, sendCmd) - defer done() - - errMark := errors.New("something") - gomock.InOrder( - mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ - To: mustAddr(address.NewIDAddress(1)), - Val: oneFil, - }).Return(cid.Undef, errMark), - mockSrvcs.EXPECT().Close(), - ) - err := app.Run([]string{"lotus", "send", "t01", "1"}) - assert.ErrorIs(t, err, errMark) - }) - - t.Run("from-specific", func(t *testing.T) { - app, mockSrvcs, buf, done := newMockApp(t, sendCmd) - defer done() - - gomock.InOrder( - mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ - To: mustAddr(address.NewIDAddress(1)), - From: mustAddr(address.NewIDAddress(2)), - Val: oneFil, - }).Return(arbtCid, nil), - mockSrvcs.EXPECT().Close(), - ) - err := app.Run([]string{"lotus", "send", "--from=t02", "t01", "1"}) - assert.NoError(t, err) - assert.EqualValues(t, arbtCid.String()+"\n", buf.String()) - }) - - t.Run("nonce-specific", func(t *testing.T) { - app, mockSrvcs, buf, done := newMockApp(t, sendCmd) - defer done() - zero := uint64(0) - - gomock.InOrder( - mockSrvcs.EXPECT().Send(gomock.Any(), SendParams{ + arbtProto := &api.MessagePrototype{ + Message: types.Message{ + From: mustAddr(address.NewIDAddress(1)), To: mustAddr(address.NewIDAddress(1)), - Nonce: &zero, - Val: oneFil, - }).Return(arbtCid, nil), + Value: oneFil, + }, + } + sigMsg := fakeSign(&arbtProto.Message) + + gomock.InOrder( + mockSrvcs.EXPECT().MessageForSend(gomock.Any(), SendParams{ + To: mustAddr(address.NewIDAddress(1)), + Val: oneFil, + }).Return(arbtProto, nil), + mockSrvcs.EXPECT().PublishMessage(gomock.Any(), arbtProto, false). + Return(sigMsg, nil, nil), mockSrvcs.EXPECT().Close(), ) - err := app.Run([]string{"lotus", "send", "--nonce=0", "t01", "1"}) + err := app.Run([]string{"lotus", "send", "t01", "1"}) assert.NoError(t, err) - assert.EqualValues(t, arbtCid.String()+"\n", buf.String()) + assert.EqualValues(t, sigMsg.Cid().String()+"\n", buf.String()) }) - } diff --git a/cli/sending_ui.go b/cli/sending_ui.go new file mode 100644 index 000000000..a70abefb9 --- /dev/null +++ b/cli/sending_ui.go @@ -0,0 +1,264 @@ +package cli + +import ( + "context" + "errors" + "fmt" + "io" + "strings" + + "github.com/Kubuxu/imtui" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + types "github.com/filecoin-project/lotus/chain/types" + "github.com/gdamore/tcell/v2" + cid "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +func InteractiveSend(ctx context.Context, cctx *cli.Context, srv ServicesAPI, + proto *api.MessagePrototype) (*types.SignedMessage, error) { + + msg, checks, err := srv.PublishMessage(ctx, proto, cctx.Bool("force") || cctx.Bool("force-send")) + printer := cctx.App.Writer + if xerrors.Is(err, ErrCheckFailed) { + if !cctx.Bool("interactive") { + fmt.Fprintf(printer, "Following checks have failed:\n") + printChecks(printer, checks, proto.Message.Cid()) + } else { + proto, err = resolveChecks(ctx, srv, cctx.App.Writer, proto, checks) + if err != nil { + return nil, xerrors.Errorf("from UI: %w", err) + } + + msg, _, err = srv.PublishMessage(ctx, proto, true) + } + } + if err != nil { + return nil, xerrors.Errorf("publishing message: %w", err) + } + + return msg, nil +} + +var interactiveSolves = map[api.CheckStatusCode]bool{ + api.CheckStatusMessageMinBaseFee: true, + api.CheckStatusMessageBaseFee: true, + api.CheckStatusMessageBaseFeeLowerBound: true, + api.CheckStatusMessageBaseFeeUpperBound: true, +} + +func baseFeeFromHints(hint map[string]interface{}) big.Int { + bHint, ok := hint["baseFee"] + if !ok { + return big.Zero() + } + bHintS, ok := bHint.(string) + if !ok { + return big.Zero() + } + + var err error + baseFee, err := big.FromString(bHintS) + if err != nil { + return big.Zero() + } + return baseFee +} + +func resolveChecks(ctx context.Context, s ServicesAPI, printer io.Writer, + proto *api.MessagePrototype, checkGroups [][]api.MessageCheckStatus, +) (*api.MessagePrototype, error) { + + fmt.Fprintf(printer, "Following checks have failed:\n") + printChecks(printer, checkGroups, proto.Message.Cid()) + + if feeCapBad, baseFee := isFeeCapProblem(checkGroups, proto.Message.Cid()); feeCapBad { + fmt.Fprintf(printer, "Fee of the message can be adjusted\n") + if askUser(printer, "Do you wish to do that? [Yes/no]: ", true) { + var err error + proto, err = runFeeCapAdjustmentUI(proto, baseFee) + if err != nil { + return nil, err + } + } + checks, err := s.RunChecksForPrototype(ctx, proto) + if err != nil { + return nil, err + } + fmt.Fprintf(printer, "Following checks still failed:\n") + printChecks(printer, checks, proto.Message.Cid()) + } + + if !askUser(printer, "Do you wish to send this message? [yes/No]: ", false) { + return nil, ErrAbortedByUser + } + return proto, nil +} + +var ErrAbortedByUser = errors.New("aborted by user") + +func printChecks(printer io.Writer, checkGroups [][]api.MessageCheckStatus, protoCid cid.Cid) { + for _, checks := range checkGroups { + for _, c := range checks { + if c.OK { + continue + } + aboutProto := c.Cid.Equals(protoCid) + msgName := "current" + if !aboutProto { + msgName = c.Cid.String() + } + fmt.Fprintf(printer, "%s message failed a check %s: %s\n", msgName, c.Code, c.Err) + } + } +} + +func askUser(printer io.Writer, q string, def bool) bool { + var resp string + fmt.Fprint(printer, q) + fmt.Scanln(&resp) + resp = strings.ToLower(resp) + if len(resp) == 0 { + return def + } + return resp[0] == 'y' +} + +func isFeeCapProblem(checkGroups [][]api.MessageCheckStatus, protoCid cid.Cid) (bool, big.Int) { + baseFee := big.Zero() + yes := false + for _, checks := range checkGroups { + for _, c := range checks { + if c.OK { + continue + } + aboutProto := c.Cid.Equals(protoCid) + if aboutProto && interactiveSolves[c.Code] { + yes = true + if baseFee.IsZero() { + baseFee = baseFeeFromHints(c.Hint) + } + } + } + } + if baseFee.IsZero() { + // this will only be the case if failing check is: MessageMinBaseFee + baseFee = big.NewInt(build.MinimumBaseFee) + } + + return yes, baseFee +} + +func runFeeCapAdjustmentUI(proto *api.MessagePrototype, baseFee abi.TokenAmount) (*api.MessagePrototype, error) { + t, err := imtui.NewTui() + if err != nil { + return nil, err + } + + maxFee := big.Mul(proto.Message.GasFeeCap, big.NewInt(proto.Message.GasLimit)) + send := false + t.PushScene(feeUI(baseFee, proto.Message.GasLimit, &maxFee, &send)) + + err = t.Run() + if err != nil { + return nil, err + } + if !send { + return nil, fmt.Errorf("aborted by user") + } + + proto.Message.GasFeeCap = big.Div(maxFee, big.NewInt(proto.Message.GasLimit)) + + return proto, nil +} + +func feeUI(baseFee abi.TokenAmount, gasLimit int64, maxFee *abi.TokenAmount, send *bool) func(*imtui.Tui) error { + orignalMaxFee := *maxFee + required := big.Mul(baseFee, big.NewInt(gasLimit)) + safe := big.Mul(required, big.NewInt(10)) + + price := fmt.Sprintf("%s", types.FIL(*maxFee).Unitless()) + + return func(t *imtui.Tui) error { + if t.CurrentKey != nil { + if t.CurrentKey.Key() == tcell.KeyRune { + pF, err := types.ParseFIL(price) + switch t.CurrentKey.Rune() { + case 's', 'S': + price = types.FIL(safe).Unitless() + case '+': + if err == nil { + p := big.Mul(big.Int(pF), types.NewInt(11)) + p = big.Div(p, types.NewInt(10)) + price = fmt.Sprintf("%s", types.FIL(p).Unitless()) + } + case '-': + if err == nil { + p := big.Mul(big.Int(pF), types.NewInt(10)) + p = big.Div(p, types.NewInt(11)) + price = fmt.Sprintf("%s", types.FIL(p).Unitless()) + } + default: + } + } + + if t.CurrentKey.Key() == tcell.KeyEnter { + *send = true + t.PopScene() + return nil + } + } + + defS := tcell.StyleDefault + + row := 0 + t.Label(0, row, "Fee of the message is too low.", defS) + row++ + + t.Label(0, row, fmt.Sprintf("Your configured maximum fee is: %s FIL", + types.FIL(orignalMaxFee).Unitless()), defS) + row++ + t.Label(0, row, fmt.Sprintf("Required maximum fee for the message: %s FIL", + types.FIL(required).Unitless()), defS) + row++ + w := t.Label(0, row, fmt.Sprintf("Safe maximum fee for the message: %s FIL", + types.FIL(safe).Unitless()), defS) + t.Label(w, row, " Press S to use it", defS) + row++ + + w = t.Label(0, row, "Current Maximum Fee: ", defS) + + w += t.EditFieldFiltered(w, row, 14, &price, imtui.FilterDecimal, defS.Foreground(tcell.ColorWhite).Background(tcell.ColorBlack)) + + w += t.Label(w, row, " FIL", defS) + + pF, err := types.ParseFIL(price) + *maxFee = abi.TokenAmount(pF) + if err != nil { + w += t.Label(w, row, " invalid price", defS.Foreground(tcell.ColorMaroon).Bold(true)) + } else if maxFee.GreaterThanEqual(safe) { + w += t.Label(w, row, " SAFE", defS.Foreground(tcell.ColorDarkGreen).Bold(true)) + } else if maxFee.GreaterThanEqual(required) { + w += t.Label(w, row, " low", defS.Foreground(tcell.ColorYellow).Bold(true)) + over := big.Div(big.Mul(*maxFee, big.NewInt(100)), required) + w += t.Label(w, row, + fmt.Sprintf(" %.1fx over the minimum", float64(over.Int64())/100.0), defS) + } else { + w += t.Label(w, row, " too low", defS.Foreground(tcell.ColorRed).Bold(true)) + } + row += 2 + + t.Label(0, row, fmt.Sprintf("Current Base Fee is: %s", types.FIL(baseFee).Nano()), defS) + row++ + t.Label(0, row, fmt.Sprintf("Resulting FeeCap is: %s", + types.FIL(big.Div(*maxFee, big.NewInt(gasLimit))).Nano()), defS) + row++ + t.Label(0, row, "You can use '+' and '-' to adjust the fee.", defS) + + return nil + } +} diff --git a/cli/services.go b/cli/services.go index 3de0b567b..0923680aa 100644 --- a/cli/services.go +++ b/cli/services.go @@ -4,14 +4,14 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" "reflect" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/stmgr" types "github.com/filecoin-project/lotus/chain/types" cid "github.com/ipfs/go-cid" @@ -22,12 +22,30 @@ import ( //go:generate go run github.com/golang/mock/mockgen -destination=servicesmock_test.go -package=cli -self_package github.com/filecoin-project/lotus/cli . ServicesAPI type ServicesAPI interface { - // Sends executes a send given SendParams - Send(ctx context.Context, params SendParams) (cid.Cid, error) + FullNodeAPI() api.FullNode + + GetBaseFee(ctx context.Context) (abi.TokenAmount, error) + + // MessageForSend creates a prototype of a message based on SendParams + MessageForSend(ctx context.Context, params SendParams) (*api.MessagePrototype, error) + // DecodeTypedParamsFromJSON takes in information needed to identify a method and converts JSON // parameters to bytes of their CBOR encoding DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) + RunChecksForPrototype(ctx context.Context, prototype *api.MessagePrototype) ([][]api.MessageCheckStatus, error) + + // PublishMessage takes in a message prototype and publishes it + // before publishing the message, it runs checks on the node, message and mpool to verify that + // message is valid and won't be stuck. + // if `force` is true, it skips the checks + PublishMessage(ctx context.Context, prototype *api.MessagePrototype, force bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) + + LocalAddresses(ctx context.Context) (address.Address, []address.Address, error) + + MpoolPendingFilter(ctx context.Context, filter func(*types.SignedMessage) bool, tsk types.TipSetKey) ([]*types.SignedMessage, error) + MpoolCheckPendingMessages(ctx context.Context, a address.Address) ([][]api.MessageCheckStatus, error) + // Close ends the session of services and disconnects from RPC, using Services after Close is called // most likely will result in an error // Should not be called concurrently @@ -35,10 +53,14 @@ type ServicesAPI interface { } type ServicesImpl struct { - api v0api.FullNode + api api.FullNode closer jsonrpc.ClientCloser } +func (s *ServicesImpl) FullNodeAPI() api.FullNode { + return s.api +} + func (s *ServicesImpl) Close() error { if s.closer == nil { return xerrors.Errorf("Services already closed") @@ -48,6 +70,16 @@ func (s *ServicesImpl) Close() error { return nil } +func (s *ServicesImpl) GetBaseFee(ctx context.Context) (abi.TokenAmount, error) { + // not used but useful + + ts, err := s.api.ChainHead(ctx) + if err != nil { + return big.Zero(), xerrors.Errorf("getting head: %w", err) + } + return ts.MinTicketBlock().ParentBaseFee, nil +} + func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address.Address, method abi.MethodNum, paramstr string) ([]byte, error) { act, err := s.api.StateGetActor(ctx, to, types.EmptyTSK) if err != nil { @@ -72,6 +104,79 @@ func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address return buf.Bytes(), nil } +type CheckInfo struct { + MessageTie cid.Cid + CurrentMessageTie bool + + Check api.MessageCheckStatus +} + +var ErrCheckFailed = fmt.Errorf("check has failed") + +func (s *ServicesImpl) RunChecksForPrototype(ctx context.Context, prototype *api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + var outChecks [][]api.MessageCheckStatus + checks, err := s.api.MpoolCheckMessages(ctx, []*api.MessagePrototype{prototype}) + if err != nil { + return nil, xerrors.Errorf("message check: %w", err) + } + outChecks = append(outChecks, checks...) + + checks, err = s.api.MpoolCheckPendingMessages(ctx, prototype.Message.From) + if err != nil { + return nil, xerrors.Errorf("pending mpool check: %w", err) + } + outChecks = append(outChecks, checks...) + + return outChecks, nil +} + +// PublishMessage modifies prototype to include gas estimation +// Errors with ErrCheckFailed if any of the checks fail +// First group of checks is related to the message prototype +func (s *ServicesImpl) PublishMessage(ctx context.Context, + prototype *api.MessagePrototype, force bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) { + + gasedMsg, err := s.api.GasEstimateMessageGas(ctx, &prototype.Message, nil, types.EmptyTSK) + if err != nil { + return nil, nil, xerrors.Errorf("estimating gas: %w", err) + } + prototype.Message = *gasedMsg + + if !force { + checks, err := s.RunChecksForPrototype(ctx, prototype) + if err != nil { + return nil, nil, xerrors.Errorf("running checks: %w", err) + } + for _, chks := range checks { + for _, c := range chks { + if !c.OK { + return nil, checks, ErrCheckFailed + } + } + } + } + + if prototype.ValidNonce { + sm, err := s.api.WalletSignMessage(ctx, prototype.Message.From, &prototype.Message) + if err != nil { + return nil, nil, err + } + + _, err = s.api.MpoolPush(ctx, sm) + if err != nil { + return nil, nil, err + } + return sm, nil, nil + } + + sm, err := s.api.MpoolPushMessage(ctx, &prototype.Message, nil) + if err != nil { + return nil, nil, err + } + + return sm, nil, nil +} + type SendParams struct { To address.Address From address.Address @@ -84,26 +189,18 @@ type SendParams struct { Nonce *uint64 Method abi.MethodNum Params []byte - - Force bool } -// This is specialised Send for Send command -// There might be room for generic Send that other commands can use to send their messages -// We will see - -var ErrSendBalanceTooLow = errors.New("balance too low") - -func (s *ServicesImpl) Send(ctx context.Context, params SendParams) (cid.Cid, error) { +func (s *ServicesImpl) MessageForSend(ctx context.Context, params SendParams) (*api.MessagePrototype, error) { if params.From == address.Undef { defaddr, err := s.api.WalletDefaultAddress(ctx) if err != nil { - return cid.Undef, err + return nil, err } params.From = defaddr } - msg := &types.Message{ + msg := types.Message{ From: params.From, To: params.To, Value: params.Val, @@ -127,40 +224,53 @@ func (s *ServicesImpl) Send(ctx context.Context, params SendParams) (cid.Cid, er } else { msg.GasLimit = 0 } - - if !params.Force { - // Funds insufficient check - fromBalance, err := s.api.WalletBalance(ctx, msg.From) - if err != nil { - return cid.Undef, err - } - totalCost := types.BigAdd(types.BigMul(msg.GasFeeCap, types.NewInt(uint64(msg.GasLimit))), msg.Value) - - if fromBalance.LessThan(totalCost) { - return cid.Undef, xerrors.Errorf("From balance %s less than total cost %s: %w", types.FIL(fromBalance), types.FIL(totalCost), ErrSendBalanceTooLow) - - } - } - + validNonce := false if params.Nonce != nil { msg.Nonce = *params.Nonce - sm, err := s.api.WalletSignMessage(ctx, params.From, msg) - if err != nil { - return cid.Undef, err - } - - _, err = s.api.MpoolPush(ctx, sm) - if err != nil { - return cid.Undef, err - } - - return sm.Cid(), nil + validNonce = true } - sm, err := s.api.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, err + prototype := &api.MessagePrototype{ + Message: msg, + ValidNonce: validNonce, } - - return sm.Cid(), nil + return prototype, nil +} + +func (s *ServicesImpl) MpoolPendingFilter(ctx context.Context, filter func(*types.SignedMessage) bool, + tsk types.TipSetKey) ([]*types.SignedMessage, error) { + msgs, err := s.api.MpoolPending(ctx, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("getting pending messages: %w", err) + } + out := []*types.SignedMessage{} + for _, sm := range msgs { + if filter(sm) { + out = append(out, sm) + } + } + + return out, nil +} + +func (s *ServicesImpl) LocalAddresses(ctx context.Context) (address.Address, []address.Address, error) { + def, err := s.api.WalletDefaultAddress(ctx) + if err != nil { + return address.Undef, nil, xerrors.Errorf("getting default addr: %w", err) + } + + all, err := s.api.WalletList(ctx) + if err != nil { + return address.Undef, nil, xerrors.Errorf("getting list of addrs: %w", err) + } + + return def, all, nil +} + +func (s *ServicesImpl) MpoolCheckPendingMessages(ctx context.Context, a address.Address) ([][]api.MessageCheckStatus, error) { + checks, err := s.api.MpoolCheckPendingMessages(ctx, a) + if err != nil { + return nil, xerrors.Errorf("pending mpool check: %w", err) + } + return checks, nil } diff --git a/cli/services_send_test.go b/cli/services_send_test.go index 713e81b2a..b7ed78f80 100644 --- a/cli/services_send_test.go +++ b/cli/services_send_test.go @@ -9,10 +9,9 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/api" - mocks "github.com/filecoin-project/lotus/api/v0api/v0mocks" + mocks "github.com/filecoin-project/lotus/api/mocks" types "github.com/filecoin-project/lotus/chain/types" gomock "github.com/golang/mock/gomock" - cid "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" ) @@ -61,6 +60,7 @@ func setupMockSrvcs(t *testing.T) (*ServicesImpl, *mocks.MockFullNode) { return srvcs, mockApi } +// linter doesn't like dead code, so these are commented out. func fakeSign(msg *types.Message) *types.SignedMessage { return &types.SignedMessage{ Message: *msg, @@ -68,15 +68,15 @@ func fakeSign(msg *types.Message) *types.SignedMessage { } } -func makeMessageSigner() (*cid.Cid, interface{}) { - smCid := cid.Undef - return &smCid, - func(_ context.Context, msg *types.Message, _ *api.MessageSendSpec) (*types.SignedMessage, error) { - sm := fakeSign(msg) - smCid = sm.Cid() - return sm, nil - } -} +//func makeMessageSigner() (*cid.Cid, interface{}) { +//smCid := cid.Undef +//return &smCid, +//func(_ context.Context, msg *types.Message, _ *api.MessageSendSpec) (*types.SignedMessage, error) { +//sm := fakeSign(msg) +//smCid = sm.Cid() +//return sm, nil +//} +//} type MessageMatcher SendParams @@ -84,11 +84,13 @@ var _ gomock.Matcher = MessageMatcher{} // Matches returns whether x is a match. func (mm MessageMatcher) Matches(x interface{}) bool { - m, ok := x.(*types.Message) + proto, ok := x.(*api.MessagePrototype) if !ok { return false } + m := &proto.Message + if mm.From != address.Undef && mm.From != m.From { return false } @@ -151,47 +153,12 @@ func TestSendService(t *testing.T) { t.Run("happy", func(t *testing.T) { params := params - srvcs, mockApi := setupMockSrvcs(t) + srvcs, _ := setupMockSrvcs(t) defer srvcs.Close() //nolint:errcheck - msgCid, sign := makeMessageSigner() - gomock.InOrder( - mockApi.EXPECT().WalletBalance(ctxM, params.From).Return(types.NewInt(balance), nil), - mockApi.EXPECT().MpoolPushMessage(ctxM, MessageMatcher(params), nil).DoAndReturn(sign), - ) - c, err := srvcs.Send(ctx, params) + proto, err := srvcs.MessageForSend(ctx, params) assert.NoError(t, err) - assert.Equal(t, *msgCid, c) - }) - - t.Run("balance-too-low", func(t *testing.T) { - params := params - srvcs, mockApi := setupMockSrvcs(t) - defer srvcs.Close() //nolint:errcheck - gomock.InOrder( - mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance-200), nil), - // no MpoolPushMessage - ) - - c, err := srvcs.Send(ctx, params) - assert.Equal(t, c, cid.Undef) - assert.ErrorIs(t, err, ErrSendBalanceTooLow) - }) - - t.Run("force", func(t *testing.T) { - params := params - params.Force = true - srvcs, mockApi := setupMockSrvcs(t) - defer srvcs.Close() //nolint:errcheck - msgCid, sign := makeMessageSigner() - gomock.InOrder( - mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance-200), nil).AnyTimes(), - mockApi.EXPECT().MpoolPushMessage(ctxM, MessageMatcher(params), nil).DoAndReturn(sign), - ) - - c, err := srvcs.Send(ctx, params) - assert.NoError(t, err) - assert.Equal(t, *msgCid, c) + assert.True(t, MessageMatcher(params).Matches(proto)) }) t.Run("default-from", func(t *testing.T) { @@ -202,16 +169,14 @@ func TestSendService(t *testing.T) { srvcs, mockApi := setupMockSrvcs(t) defer srvcs.Close() //nolint:errcheck - msgCid, sign := makeMessageSigner() + gomock.InOrder( mockApi.EXPECT().WalletDefaultAddress(ctxM).Return(a1, nil), - mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance), nil), - mockApi.EXPECT().MpoolPushMessage(ctxM, mm, nil).DoAndReturn(sign), ) - c, err := srvcs.Send(ctx, params) + proto, err := srvcs.MessageForSend(ctx, params) assert.NoError(t, err) - assert.Equal(t, *msgCid, c) + assert.True(t, mm.Matches(proto)) }) t.Run("set-nonce", func(t *testing.T) { @@ -220,26 +185,12 @@ func TestSendService(t *testing.T) { params.Nonce = &n mm := MessageMatcher(params) - srvcs, mockApi := setupMockSrvcs(t) + srvcs, _ := setupMockSrvcs(t) defer srvcs.Close() //nolint:errcheck - _, _ = mm, mockApi - var sm *types.SignedMessage - gomock.InOrder( - mockApi.EXPECT().WalletBalance(ctxM, a1).Return(types.NewInt(balance), nil), - mockApi.EXPECT().WalletSignMessage(ctxM, a1, mm).DoAndReturn( - func(_ context.Context, _ address.Address, msg *types.Message) (*types.SignedMessage, error) { - sm = fakeSign(msg) - - // now we expect MpoolPush with that SignedMessage - mockApi.EXPECT().MpoolPush(ctxM, sm).Return(sm.Cid(), nil) - return sm, nil - }), - ) - - c, err := srvcs.Send(ctx, params) + proto, err := srvcs.MessageForSend(ctx, params) assert.NoError(t, err) - assert.Equal(t, sm.Cid(), c) + assert.True(t, mm.Matches(proto)) }) t.Run("gas-params", func(t *testing.T) { @@ -251,16 +202,14 @@ func TestSendService(t *testing.T) { gp := big.NewInt(10) params.GasPremium = &gp - srvcs, mockApi := setupMockSrvcs(t) - defer srvcs.Close() //nolint:errcheck - msgCid, sign := makeMessageSigner() - gomock.InOrder( - mockApi.EXPECT().WalletBalance(ctxM, params.From).Return(types.NewInt(balance), nil), - mockApi.EXPECT().MpoolPushMessage(ctxM, MessageMatcher(params), nil).DoAndReturn(sign), - ) + mm := MessageMatcher(params) - c, err := srvcs.Send(ctx, params) + srvcs, _ := setupMockSrvcs(t) + defer srvcs.Close() //nolint:errcheck + + proto, err := srvcs.MessageForSend(ctx, params) assert.NoError(t, err) - assert.Equal(t, *msgCid, c) + assert.True(t, mm.Matches(proto)) + }) } diff --git a/cli/servicesmock_test.go b/cli/servicesmock_test.go index 48f1a95ec..5bae52a5e 100644 --- a/cli/servicesmock_test.go +++ b/cli/servicesmock_test.go @@ -6,37 +6,40 @@ package cli import ( context "context" + reflect "reflect" + go_address "github.com/filecoin-project/go-address" abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + api "github.com/filecoin-project/lotus/api" + types "github.com/filecoin-project/lotus/chain/types" gomock "github.com/golang/mock/gomock" - go_cid "github.com/ipfs/go-cid" - reflect "reflect" ) -// MockServicesAPI is a mock of ServicesAPI interface +// MockServicesAPI is a mock of ServicesAPI interface. type MockServicesAPI struct { ctrl *gomock.Controller recorder *MockServicesAPIMockRecorder } -// MockServicesAPIMockRecorder is the mock recorder for MockServicesAPI +// MockServicesAPIMockRecorder is the mock recorder for MockServicesAPI. type MockServicesAPIMockRecorder struct { mock *MockServicesAPI } -// NewMockServicesAPI creates a new mock instance +// NewMockServicesAPI creates a new mock instance. func NewMockServicesAPI(ctrl *gomock.Controller) *MockServicesAPI { mock := &MockServicesAPI{ctrl: ctrl} mock.recorder = &MockServicesAPIMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockServicesAPI) EXPECT() *MockServicesAPIMockRecorder { return m.recorder } -// Close mocks base method +// Close mocks base method. func (m *MockServicesAPI) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") @@ -44,13 +47,13 @@ func (m *MockServicesAPI) Close() error { return ret0 } -// Close indicates an expected call of Close +// Close indicates an expected call of Close. func (mr *MockServicesAPIMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockServicesAPI)(nil).Close)) } -// DecodeTypedParamsFromJSON mocks base method +// DecodeTypedParamsFromJSON mocks base method. func (m *MockServicesAPI) DecodeTypedParamsFromJSON(arg0 context.Context, arg1 go_address.Address, arg2 abi.MethodNum, arg3 string) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DecodeTypedParamsFromJSON", arg0, arg1, arg2, arg3) @@ -59,23 +62,129 @@ func (m *MockServicesAPI) DecodeTypedParamsFromJSON(arg0 context.Context, arg1 g return ret0, ret1 } -// DecodeTypedParamsFromJSON indicates an expected call of DecodeTypedParamsFromJSON +// DecodeTypedParamsFromJSON indicates an expected call of DecodeTypedParamsFromJSON. func (mr *MockServicesAPIMockRecorder) DecodeTypedParamsFromJSON(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecodeTypedParamsFromJSON", reflect.TypeOf((*MockServicesAPI)(nil).DecodeTypedParamsFromJSON), arg0, arg1, arg2, arg3) } -// Send mocks base method -func (m *MockServicesAPI) Send(arg0 context.Context, arg1 SendParams) (go_cid.Cid, error) { +// FullNodeAPI mocks base method. +func (m *MockServicesAPI) FullNodeAPI() api.FullNode { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0, arg1) - ret0, _ := ret[0].(go_cid.Cid) + ret := m.ctrl.Call(m, "FullNodeAPI") + ret0, _ := ret[0].(api.FullNode) + return ret0 +} + +// FullNodeAPI indicates an expected call of FullNodeAPI. +func (mr *MockServicesAPIMockRecorder) FullNodeAPI() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FullNodeAPI", reflect.TypeOf((*MockServicesAPI)(nil).FullNodeAPI)) +} + +// GetBaseFee mocks base method. +func (m *MockServicesAPI) GetBaseFee(arg0 context.Context) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBaseFee", arg0) + ret0, _ := ret[0].(big.Int) ret1, _ := ret[1].(error) return ret0, ret1 } -// Send indicates an expected call of Send -func (mr *MockServicesAPIMockRecorder) Send(arg0, arg1 interface{}) *gomock.Call { +// GetBaseFee indicates an expected call of GetBaseFee. +func (mr *MockServicesAPIMockRecorder) GetBaseFee(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockServicesAPI)(nil).Send), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseFee", reflect.TypeOf((*MockServicesAPI)(nil).GetBaseFee), arg0) +} + +// LocalAddresses mocks base method. +func (m *MockServicesAPI) LocalAddresses(arg0 context.Context) (go_address.Address, []go_address.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LocalAddresses", arg0) + ret0, _ := ret[0].(go_address.Address) + ret1, _ := ret[1].([]go_address.Address) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// LocalAddresses indicates an expected call of LocalAddresses. +func (mr *MockServicesAPIMockRecorder) LocalAddresses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalAddresses", reflect.TypeOf((*MockServicesAPI)(nil).LocalAddresses), arg0) +} + +// MessageForSend mocks base method. +func (m *MockServicesAPI) MessageForSend(arg0 context.Context, arg1 SendParams) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessageForSend", arg0, arg1) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MessageForSend indicates an expected call of MessageForSend. +func (mr *MockServicesAPIMockRecorder) MessageForSend(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageForSend", reflect.TypeOf((*MockServicesAPI)(nil).MessageForSend), arg0, arg1) +} + +// MpoolCheckPendingMessages mocks base method. +func (m *MockServicesAPI) MpoolCheckPendingMessages(arg0 context.Context, arg1 go_address.Address) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolCheckPendingMessages", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolCheckPendingMessages indicates an expected call of MpoolCheckPendingMessages. +func (mr *MockServicesAPIMockRecorder) MpoolCheckPendingMessages(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolCheckPendingMessages", reflect.TypeOf((*MockServicesAPI)(nil).MpoolCheckPendingMessages), arg0, arg1) +} + +// MpoolPendingFilter mocks base method. +func (m *MockServicesAPI) MpoolPendingFilter(arg0 context.Context, arg1 func(*types.SignedMessage) bool, arg2 types.TipSetKey) ([]*types.SignedMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MpoolPendingFilter", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.SignedMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MpoolPendingFilter indicates an expected call of MpoolPendingFilter. +func (mr *MockServicesAPIMockRecorder) MpoolPendingFilter(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MpoolPendingFilter", reflect.TypeOf((*MockServicesAPI)(nil).MpoolPendingFilter), arg0, arg1, arg2) +} + +// PublishMessage mocks base method. +func (m *MockServicesAPI) PublishMessage(arg0 context.Context, arg1 *api.MessagePrototype, arg2 bool) (*types.SignedMessage, [][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublishMessage", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.SignedMessage) + ret1, _ := ret[1].([][]api.MessageCheckStatus) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublishMessage indicates an expected call of PublishMessage. +func (mr *MockServicesAPIMockRecorder) PublishMessage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishMessage", reflect.TypeOf((*MockServicesAPI)(nil).PublishMessage), arg0, arg1, arg2) +} + +// RunChecksForPrototype mocks base method. +func (m *MockServicesAPI) RunChecksForPrototype(arg0 context.Context, arg1 *api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunChecksForPrototype", arg0, arg1) + ret0, _ := ret[0].([][]api.MessageCheckStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RunChecksForPrototype indicates an expected call of RunChecksForPrototype. +func (mr *MockServicesAPIMockRecorder) RunChecksForPrototype(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunChecksForPrototype", reflect.TypeOf((*MockServicesAPI)(nil).RunChecksForPrototype), arg0, arg1) } diff --git a/cli/state.go b/cli/state.go index 60bb0b59f..d5251fb85 100644 --- a/cli/state.go +++ b/cli/state.go @@ -3,6 +3,8 @@ package cli import ( "bytes" "context" + "encoding/base64" + "encoding/hex" "encoding/json" "fmt" "html/template" @@ -15,6 +17,8 @@ import ( "strings" "time" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api/v0api" "github.com/fatih/color" @@ -22,7 +26,6 @@ import ( "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p-core/peer" "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multihash" "github.com/urfave/cli/v2" @@ -31,7 +34,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/api" @@ -75,6 +77,50 @@ var StateCmd = &cli.Command{ StateMarketCmd, StateExecTraceCmd, StateNtwkVersionCmd, + StateMinerProvingDeadlineCmd, + }, +} + +var StateMinerProvingDeadlineCmd = &cli.Command{ + Name: "miner-proving-deadline", + Usage: "Retrieve information about a given miner's proving deadline", + ArgsUsage: "[minerAddress]", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := ReqContext(cctx) + + if !cctx.Args().Present() { + return fmt.Errorf("must specify miner to get information for") + } + + addr, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key()) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + fmt.Printf("Period Start:\t%s\n", cd.PeriodStart) + fmt.Printf("Index:\t\t%d\n", cd.Index) + fmt.Printf("Open:\t\t%s\n", cd.Open) + fmt.Printf("Close:\t\t%s\n", cd.Close) + fmt.Printf("Challenge:\t%s\n", cd.Challenge) + fmt.Printf("FaultCutoff:\t%s\n", cd.FaultCutoff) + + return nil }, } @@ -139,18 +185,23 @@ var StateMinerInfo = &cli.Command{ return err } - rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower) - qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower) - fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n", color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), types.SizeStr(pow.TotalPower.RawBytePower), - float64(rpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n", color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), types.DeciStr(pow.TotalPower.QualityAdjPower), - float64(qpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) fmt.Println() @@ -180,10 +231,13 @@ func ParseTipSetString(ts string) ([]cid.Cid, error) { return cids, nil } +// LoadTipSet gets the tipset from the context, or the head from the API. +// +// It always gets the head from the API so commands use a consistent tipset even if time pases. func LoadTipSet(ctx context.Context, cctx *cli.Context, api v0api.FullNode) (*types.TipSet, error) { tss := cctx.String("tipset") if tss == "" { - return nil, nil + return api.ChainHead(ctx) } return ParseTipSetRef(ctx, api, tss) @@ -234,17 +288,26 @@ var StatePowerCmd = &cli.Command{ ctx := ReqContext(cctx) + ts, err := LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + var maddr address.Address if cctx.Args().Present() { maddr, err = address.NewFromString(cctx.Args().First()) if err != nil { return err } - } - ts, err := LoadTipSet(ctx, cctx, api) - if err != nil { - return err + ma, err := api.StateGetActor(ctx, maddr, ts.Key()) + if err != nil { + return err + } + + if !builtin.IsStorageMinerActor(ma.Code) { + return xerrors.New("provided address does not correspond to a miner actor") + } } power, err := api.StateMinerPower(ctx, maddr, ts.Key()) @@ -255,8 +318,15 @@ var StatePowerCmd = &cli.Command{ tp := power.TotalPower if cctx.Args().Present() { mp := power.MinerPower - percI := types.BigDiv(types.BigMul(mp.QualityAdjPower, types.NewInt(1000000)), tp.QualityAdjPower) - fmt.Printf("%s(%s) / %s(%s) ~= %0.4f%%\n", mp.QualityAdjPower.String(), types.SizeStr(mp.QualityAdjPower), tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower), float64(percI.Int64())/10000) + fmt.Printf( + "%s(%s) / %s(%s) ~= %0.4f%%\n", + mp.QualityAdjPower.String(), types.SizeStr(mp.QualityAdjPower), + tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower), + types.BigDivFloat( + types.BigMul(mp.QualityAdjPower, big.NewInt(100)), + tp.QualityAdjPower, + ), + ) } else { fmt.Printf("%s(%s)\n", tp.QualityAdjPower.String(), types.SizeStr(tp.QualityAdjPower)) } @@ -298,7 +368,7 @@ var StateSectorsCmd = &cli.Command{ } for _, s := range sectors { - fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) + fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID) } return nil @@ -338,7 +408,7 @@ var StateActiveSectorsCmd = &cli.Command{ } for _, s := range sectors { - fmt.Printf("%d: %x\n", s.SectorNumber, s.SealedCID) + fmt.Printf("%d: %s\n", s.SectorNumber, s.SealedCID) } return nil @@ -376,6 +446,9 @@ var StateExecTraceCmd = &cli.Command{ if err != nil { return err } + if lookup == nil { + return fmt.Errorf("failed to find message: %s", mcid) + } ts, err := capi.ChainGetTipSet(ctx, lookup.TipSet) if err != nil { @@ -625,7 +698,7 @@ var StateListActorsCmd = &cli.Command{ var StateGetActorCmd = &cli.Command{ Name: "get-actor", Usage: "Print actor information", - ArgsUsage: "[actorrAddress]", + ArgsUsage: "[actorAddress]", Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -850,14 +923,6 @@ var StateListMessagesCmd = &cli.Command{ return err } - if ts == nil { - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - ts = head - } - windowSize := abi.ChainEpoch(100) cur := ts @@ -957,13 +1022,6 @@ var StateComputeStateCmd = &cli.Command{ } h := abi.ChainEpoch(cctx.Uint64("vm-height")) - if ts == nil { - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - ts = head - } if h == 0 { h = ts.Height() } @@ -1436,6 +1494,10 @@ var StateSearchMsgCmd = &cli.Command{ return err } + if mw == nil { + return fmt.Errorf("failed to find message: %s", msg) + } + m, err := api.ChainGetMessage(ctx, msg) if err != nil { return err @@ -1489,7 +1551,7 @@ func printMsg(ctx context.Context, api v0api.FullNode, msg cid.Cid, mw *lapi.Msg var StateCallCmd = &cli.Command{ Name: "call", Usage: "Invoke a method on an actor locally", - ArgsUsage: "[toAddress methodId (optional)]", + ArgsUsage: "[toAddress methodId params (optional)]", Flags: []cli.Flag{ &cli.StringFlag{ Name: "from", @@ -1503,8 +1565,13 @@ var StateCallCmd = &cli.Command{ }, &cli.StringFlag{ Name: "ret", - Usage: "specify how to parse output (auto, raw, addr, big)", - Value: "auto", + Usage: "specify how to parse output (raw, decoded, base64, hex)", + Value: "decoded", + }, + &cli.StringFlag{ + Name: "encoding", + Value: "base64", + Usage: "specify params encoding to parse (base64, hex)", }, }, Action: func(cctx *cli.Context) error { @@ -1545,14 +1612,23 @@ var StateCallCmd = &cli.Command{ return fmt.Errorf("failed to parse 'value': %s", err) } - act, err := api.StateGetActor(ctx, toa, ts.Key()) - if err != nil { - return fmt.Errorf("failed to lookup target actor: %s", err) - } - - params, err := parseParamsForMethod(act.Code, method, cctx.Args().Slice()[2:]) - if err != nil { - return fmt.Errorf("failed to parse params: %s", err) + var params []byte + // If params were passed in, decode them + if cctx.Args().Len() > 2 { + switch cctx.String("encoding") { + case "base64": + params, err = base64.StdEncoding.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding base64 value: %w", err) + } + case "hex": + params, err = hex.DecodeString(cctx.Args().Get(2)) + if err != nil { + return xerrors.Errorf("decoding hex value: %w", err) + } + default: + return xerrors.Errorf("unrecognized encoding: %s", cctx.String("encoding")) + } } ret, err := api.StateCall(ctx, &types.Message{ @@ -1563,137 +1639,42 @@ var StateCallCmd = &cli.Command{ Params: params, }, ts.Key()) if err != nil { - return fmt.Errorf("state call failed: %s", err) + return fmt.Errorf("state call failed: %w", err) } if ret.MsgRct.ExitCode != 0 { return fmt.Errorf("invocation failed (exit: %d, gasUsed: %d): %s", ret.MsgRct.ExitCode, ret.MsgRct.GasUsed, ret.Error) } - s, err := formatOutput(cctx.String("ret"), ret.MsgRct.Return) - if err != nil { - return fmt.Errorf("failed to format output: %s", err) - } + fmt.Println("Call receipt:") + fmt.Printf("Exit code: %d\n", ret.MsgRct.ExitCode) + fmt.Printf("Gas Used: %d\n", ret.MsgRct.GasUsed) - fmt.Printf("gas used: %d\n", ret.MsgRct.GasUsed) - fmt.Printf("return: %s\n", s) + switch cctx.String("ret") { + case "decoded": + act, err := api.StateGetActor(ctx, toa, ts.Key()) + if err != nil { + return xerrors.Errorf("getting actor: %w", err) + } + + retStr, err := jsonReturn(act.Code, abi.MethodNum(method), ret.MsgRct.Return) + if err != nil { + return xerrors.Errorf("decoding return: %w", err) + } + + fmt.Printf("Return:\n%s\n", retStr) + case "raw": + fmt.Printf("Return: \n%s\n", ret.MsgRct.Return) + case "hex": + fmt.Printf("Return: \n%x\n", ret.MsgRct.Return) + case "base64": + fmt.Printf("Return: \n%s\n", base64.StdEncoding.EncodeToString(ret.MsgRct.Return)) + } return nil }, } -func formatOutput(t string, val []byte) (string, error) { - switch t { - case "raw", "hex": - return fmt.Sprintf("%x", val), nil - case "address", "addr", "a": - a, err := address.NewFromBytes(val) - if err != nil { - return "", err - } - return a.String(), nil - case "big", "int", "bigint": - bi := types.BigFromBytes(val) - return bi.String(), nil - case "fil": - bi := types.FIL(types.BigFromBytes(val)) - return bi.String(), nil - case "pid", "peerid", "peer": - pid, err := peer.IDFromBytes(val) - if err != nil { - return "", err - } - - return pid.Pretty(), nil - case "auto": - if len(val) == 0 { - return "", nil - } - - a, err := address.NewFromBytes(val) - if err == nil { - return "address: " + a.String(), nil - } - - pid, err := peer.IDFromBytes(val) - if err == nil { - return "peerID: " + pid.Pretty(), nil - } - - bi := types.BigFromBytes(val) - return "bigint: " + bi.String(), nil - default: - return "", fmt.Errorf("unrecognized output type: %q", t) - } -} - -func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, error) { - if len(args) == 0 { - return nil, nil - } - - // TODO: consider moving this to a dedicated helper - actMeta, ok := stmgr.MethodsMap[act] - if !ok { - return nil, fmt.Errorf("unknown actor %s", act) - } - - methodMeta, ok := actMeta[abi.MethodNum(method)] - if !ok { - return nil, fmt.Errorf("unknown method %d for actor %s", method, act) - } - - paramObj := methodMeta.Params.Elem() - if paramObj.NumField() != len(args) { - return nil, fmt.Errorf("not enough arguments given to call that method (expecting %d)", paramObj.NumField()) - } - - p := reflect.New(paramObj) - for i := 0; i < len(args); i++ { - switch paramObj.Field(i).Type { - case reflect.TypeOf(address.Address{}): - a, err := address.NewFromString(args[i]) - if err != nil { - return nil, fmt.Errorf("failed to parse address: %s", err) - } - p.Elem().Field(i).Set(reflect.ValueOf(a)) - case reflect.TypeOf(uint64(0)): - val, err := strconv.ParseUint(args[i], 10, 64) - if err != nil { - return nil, err - } - p.Elem().Field(i).Set(reflect.ValueOf(val)) - case reflect.TypeOf(abi.ChainEpoch(0)): - val, err := strconv.ParseInt(args[i], 10, 64) - if err != nil { - return nil, err - } - p.Elem().Field(i).Set(reflect.ValueOf(abi.ChainEpoch(val))) - case reflect.TypeOf(big.Int{}): - val, err := big.FromString(args[i]) - if err != nil { - return nil, err - } - p.Elem().Field(i).Set(reflect.ValueOf(val)) - case reflect.TypeOf(peer.ID("")): - pid, err := peer.Decode(args[i]) - if err != nil { - return nil, fmt.Errorf("failed to parse peer ID: %s", err) - } - p.Elem().Field(i).Set(reflect.ValueOf(pid)) - default: - return nil, fmt.Errorf("unsupported type for call (TODO): %s", paramObj.Field(i).Type) - } - } - - m := p.Interface().(cbg.CBORMarshaler) - buf := new(bytes.Buffer) - if err := m.MarshalCBOR(buf); err != nil { - return nil, fmt.Errorf("failed to marshal param object: %s", err) - } - return buf.Bytes(), nil -} - var StateCircSupplyCmd = &cli.Command{ Name: "circulating-supply", Usage: "Get the exact current circulating supply of Filecoin", @@ -1765,13 +1746,6 @@ var StateSectorCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } - maddr, err := address.NewFromString(cctx.Args().Get(0)) if err != nil { return err diff --git a/cli/status.go b/cli/status.go new file mode 100644 index 000000000..75f91196a --- /dev/null +++ b/cli/status.go @@ -0,0 +1,60 @@ +package cli + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/build" +) + +var StatusCmd = &cli.Command{ + Name: "status", + Usage: "Check node status", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "chain", + Usage: "include chain health status", + }, + }, + + Action: func(cctx *cli.Context) error { + apic, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + inclChainStatus := cctx.Bool("chain") + + status, err := apic.NodeStatus(ctx, inclChainStatus) + if err != nil { + return err + } + + fmt.Printf("Sync Epoch: %d\n", status.SyncStatus.Epoch) + fmt.Printf("Epochs Behind: %d\n", status.SyncStatus.Behind) + fmt.Printf("Peers to Publish Messages: %d\n", status.PeerStatus.PeersToPublishMsgs) + fmt.Printf("Peers to Publish Blocks: %d\n", status.PeerStatus.PeersToPublishBlocks) + + if inclChainStatus && status.SyncStatus.Epoch > uint64(build.Finality) { + var ok100, okFin string + if status.ChainStatus.BlocksPerTipsetLast100 >= 4.75 { + ok100 = "[OK]" + } else { + ok100 = "[UNHEALTHY]" + } + if status.ChainStatus.BlocksPerTipsetLastFinality >= 4.75 { + okFin = "[OK]" + } else { + okFin = "[UNHEALTHY]" + } + + fmt.Printf("Blocks per TipSet in last 100 epochs: %f %s\n", status.ChainStatus.BlocksPerTipsetLast100, ok100) + fmt.Printf("Blocks per TipSet in last finality: %f %s\n", status.ChainStatus.BlocksPerTipsetLastFinality, okFin) + } + + return nil + }, +} diff --git a/cli/test/net.go b/cli/test/net.go deleted file mode 100644 index 8e45e3aed..000000000 --- a/cli/test/net.go +++ /dev/null @@ -1,89 +0,0 @@ -package test - -import ( - "context" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api/test" - test2 "github.com/filecoin-project/lotus/node/test" -) - -func StartOneNodeOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) (test.TestNode, address.Address) { - n, sn := test2.RPCMockSbBuilder(t, test.OneFull, test.OneMiner) - - full := n[0] - miner := sn[0] - - // Get everyone connected - addrs, err := full.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - // Start mining blocks - bm := test.NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() - t.Cleanup(bm.Stop) - - // Get the full node's wallet address - fullAddr, err := full.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - // Create mock CLI - return full, fullAddr -} - -func StartTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) { - n, sn := test2.RPCMockSbBuilder(t, test.TwoFull, test.OneMiner) - - fullNode1 := n[0] - fullNode2 := n[1] - miner := sn[0] - - // Get everyone connected - addrs, err := fullNode1.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := fullNode2.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - // Start mining blocks - bm := test.NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() - t.Cleanup(bm.Stop) - - // Send some funds to register the second node - fullNodeAddr2, err := fullNode2.WalletNew(ctx, types.KTSecp256k1) - if err != nil { - t.Fatal(err) - } - - test.SendFunds(ctx, t, fullNode1, fullNodeAddr2, abi.NewTokenAmount(1e18)) - - // Get the first node's address - fullNodeAddr1, err := fullNode1.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - // Create mock CLI - return n, []address.Address{fullNodeAddr1, fullNodeAddr2} -} diff --git a/cli/test/util.go b/cli/test/util.go deleted file mode 100644 index e3930dc83..000000000 --- a/cli/test/util.go +++ /dev/null @@ -1,12 +0,0 @@ -package test - -import "github.com/ipfs/go-log/v2" - -func QuietMiningLogs() { - _ = log.SetLogLevel("miner", "ERROR") - _ = log.SetLogLevel("chainstore", "ERROR") - _ = log.SetLogLevel("chain", "ERROR") - _ = log.SetLogLevel("sub", "ERROR") - _ = log.SetLogLevel("storageminer", "ERROR") - _ = log.SetLogLevel("pubsub", "ERROR") -} diff --git a/cli/util.go b/cli/util.go index 3183e21cf..73668742d 100644 --- a/cli/util.go +++ b/cli/util.go @@ -3,10 +3,13 @@ package cli import ( "context" "fmt" + "os" "time" + "github.com/fatih/color" "github.com/hako/durafmt" "github.com/ipfs/go-cid" + "github.com/mattn/go-isatty" "github.com/filecoin-project/go-state-types/abi" @@ -15,6 +18,13 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +// Set the global default, to be overridden by individual cli flags in order +func init() { + color.NoColor = os.Getenv("GOLOG_LOG_FMT") != "color" && + !isatty.IsTerminal(os.Stdout.Fd()) && + !isatty.IsCygwinTerminal(os.Stdout.Fd()) +} + func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types.TipSet, error) { var headers []*types.BlockHeader for _, c := range vals { diff --git a/cli/wait.go b/cli/wait.go index 98fc9c0d8..ea897d5ad 100644 --- a/cli/wait.go +++ b/cli/wait.go @@ -12,7 +12,7 @@ var WaitApiCmd = &cli.Command{ Usage: "Wait for lotus api to come online", Action: func(cctx *cli.Context) error { for i := 0; i < 30; i++ { - api, closer, err := GetFullNodeAPI(cctx) + api, closer, err := GetAPI(cctx) if err != nil { fmt.Printf("Not online yet... (%s)\n", err) time.Sleep(time.Second) diff --git a/cmd/lotus-gateway/api.go b/cmd/lotus-gateway/api.go deleted file mode 100644 index 003625a84..000000000 --- a/cmd/lotus-gateway/api.go +++ /dev/null @@ -1,426 +0,0 @@ -package main - -import ( - "context" - "fmt" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - _ "github.com/filecoin-project/lotus/lib/sigs/bls" - _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/ipfs/go-cid" -) - -const ( - LookbackCap = time.Hour * 24 - StateWaitLookbackLimit = abi.ChainEpoch(20) -) - -var ( - ErrLookbackTooLong = fmt.Errorf("lookbacks of more than %s are disallowed", LookbackCap) -) - -// gatewayDepsAPI defines the API methods that the GatewayAPI depends on -// (to make it easy to mock for tests) -type gatewayDepsAPI interface { - Version(context.Context) (api.APIVersion, error) - ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) - ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) - ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) - ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) - ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) - ChainHasObj(context.Context, cid.Cid) (bool, error) - ChainHead(ctx context.Context) (*types.TipSet, error) - ChainNotify(context.Context) (<-chan []*api.HeadChange, error) - ChainReadObj(context.Context, cid.Cid) ([]byte, error) - GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) - MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) - MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) - MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) - MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error) - StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) - StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) - StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) - StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) - StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) - StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) - StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) - StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) - StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) - StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) - StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) - StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) - StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) - StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) - StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) - StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) - WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read -} - -var _ gatewayDepsAPI = *new(api.FullNode) // gateway depends on latest - -type GatewayAPI struct { - api gatewayDepsAPI - lookbackCap time.Duration - stateWaitLookbackLimit abi.ChainEpoch -} - -// NewGatewayAPI creates a new GatewayAPI with the default lookback cap -func NewGatewayAPI(api gatewayDepsAPI) *GatewayAPI { - return newGatewayAPI(api, LookbackCap, StateWaitLookbackLimit) -} - -// used by the tests -func newGatewayAPI(api gatewayDepsAPI, lookbackCap time.Duration, stateWaitLookbackLimit abi.ChainEpoch) *GatewayAPI { - return &GatewayAPI{api: api, lookbackCap: lookbackCap, stateWaitLookbackLimit: stateWaitLookbackLimit} -} - -func (a *GatewayAPI) checkTipsetKey(ctx context.Context, tsk types.TipSetKey) error { - if tsk.IsEmpty() { - return nil - } - - ts, err := a.api.ChainGetTipSet(ctx, tsk) - if err != nil { - return err - } - - return a.checkTipset(ts) -} - -func (a *GatewayAPI) checkTipset(ts *types.TipSet) error { - at := time.Unix(int64(ts.Blocks()[0].Timestamp), 0) - if err := a.checkTimestamp(at); err != nil { - return fmt.Errorf("bad tipset: %w", err) - } - return nil -} - -func (a *GatewayAPI) checkTipsetHeight(ts *types.TipSet, h abi.ChainEpoch) error { - tsBlock := ts.Blocks()[0] - heightDelta := time.Duration(uint64(tsBlock.Height-h)*build.BlockDelaySecs) * time.Second - timeAtHeight := time.Unix(int64(tsBlock.Timestamp), 0).Add(-heightDelta) - - if err := a.checkTimestamp(timeAtHeight); err != nil { - return fmt.Errorf("bad tipset height: %w", err) - } - return nil -} - -func (a *GatewayAPI) checkTimestamp(at time.Time) error { - if time.Since(at) > a.lookbackCap { - return ErrLookbackTooLong - } - - return nil -} - -func (a *GatewayAPI) Version(ctx context.Context) (api.APIVersion, error) { - return a.api.Version(ctx) -} - -func (a *GatewayAPI) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { - return a.api.ChainGetBlockMessages(ctx, c) -} - -func (a *GatewayAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return a.api.ChainHasObj(ctx, c) -} - -func (a *GatewayAPI) ChainHead(ctx context.Context) (*types.TipSet, error) { - // TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify) - - return a.api.ChainHead(ctx) -} - -func (a *GatewayAPI) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - return a.api.ChainGetMessage(ctx, mc) -} - -func (a *GatewayAPI) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { - return a.api.ChainGetTipSet(ctx, tsk) -} - -func (a *GatewayAPI) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - var ts *types.TipSet - if tsk.IsEmpty() { - head, err := a.api.ChainHead(ctx) - if err != nil { - return nil, err - } - ts = head - } else { - gts, err := a.api.ChainGetTipSet(ctx, tsk) - if err != nil { - return nil, err - } - ts = gts - } - - // Check if the tipset key refers to a tipset that's too far in the past - if err := a.checkTipset(ts); err != nil { - return nil, err - } - - // Check if the height is too far in the past - if err := a.checkTipsetHeight(ts, h); err != nil { - return nil, err - } - - return a.api.ChainGetTipSetByHeight(ctx, h, tsk) -} - -func (a *GatewayAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { - return a.api.ChainGetNode(ctx, p) -} - -func (a *GatewayAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { - return a.api.ChainNotify(ctx) -} - -func (a *GatewayAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - return a.api.ChainReadObj(ctx, c) -} - -func (a *GatewayAPI) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.GasEstimateMessageGas(ctx, msg, spec, tsk) -} - -func (a *GatewayAPI) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) { - // TODO: additional anti-spam checks - return a.api.MpoolPushUntrusted(ctx, sm) -} - -func (a *GatewayAPI) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return types.NewInt(0), err - } - - return a.api.MsigGetAvailableBalance(ctx, addr, tsk) -} - -func (a *GatewayAPI) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { - if err := a.checkTipsetKey(ctx, start); err != nil { - return types.NewInt(0), err - } - if err := a.checkTipsetKey(ctx, end); err != nil { - return types.NewInt(0), err - } - - return a.api.MsigGetVested(ctx, addr, start, end) -} - -func (a *GatewayAPI) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.MsigGetPending(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return address.Undef, err - } - - return a.api.StateAccountKey(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return api.DealCollateralBounds{}, err - } - - return a.api.StateDealProviderCollateralBounds(ctx, size, verified, tsk) -} - -func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.StateGetActor(ctx, actor, tsk) -} - -func (a *GatewayAPI) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.StateListMiners(ctx, tsk) -} - -func (a *GatewayAPI) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return address.Undef, err - } - - return a.api.StateLookupID(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return api.MarketBalance{}, err - } - - return a.api.StateMarketBalance(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - - return a.api.StateMarketStorageDeal(ctx, dealId, tsk) -} - -func (a *GatewayAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return network.VersionMax, err - } - - return a.api.StateNetworkVersion(ctx, tsk) -} - -func (a *GatewayAPI) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { - if limit == api.LookbackNoLimit { - limit = a.stateWaitLookbackLimit - } - if a.stateWaitLookbackLimit != api.LookbackNoLimit && limit > a.stateWaitLookbackLimit { - limit = a.stateWaitLookbackLimit - } - if err := a.checkTipsetKey(ctx, from); err != nil { - return nil, err - } - - return a.api.StateSearchMsg(ctx, from, msg, limit, allowReplaced) -} - -func (a *GatewayAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { - if limit == api.LookbackNoLimit { - limit = a.stateWaitLookbackLimit - } - if a.stateWaitLookbackLimit != api.LookbackNoLimit && limit > a.stateWaitLookbackLimit { - limit = a.stateWaitLookbackLimit - } - - return a.api.StateWaitMsg(ctx, msg, confidence, limit, allowReplaced) -} - -func (a *GatewayAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateReadState(ctx, actor, tsk) -} - -func (a *GatewayAPI) StateMinerPower(ctx context.Context, m address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateMinerPower(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerFaults(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return bitfield.BitField{}, err - } - return a.api.StateMinerFaults(ctx, m, tsk) -} -func (a *GatewayAPI) StateMinerRecoveries(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return bitfield.BitField{}, err - } - return a.api.StateMinerRecoveries(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerInfo(ctx context.Context, m address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return miner.MinerInfo{}, err - } - return a.api.StateMinerInfo(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateMinerDeadlines(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerAvailableBalance(ctx context.Context, m address.Address, tsk types.TipSetKey) (types.BigInt, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return types.BigInt{}, err - } - return a.api.StateMinerAvailableBalance(ctx, m, tsk) -} - -func (a *GatewayAPI) StateMinerProvingDeadline(ctx context.Context, m address.Address, tsk types.TipSetKey) (*dline.Info, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateMinerProvingDeadline(ctx, m, tsk) -} - -func (a *GatewayAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return types.BigInt{}, err - } - return a.api.StateCirculatingSupply(ctx, tsk) -} - -func (a *GatewayAPI) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateSectorGetInfo(ctx, maddr, n, tsk) -} - -func (a *GatewayAPI) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return nil, err - } - return a.api.StateVerifiedClientStatus(ctx, addr, tsk) -} - -func (a *GatewayAPI) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { - if err := a.checkTipsetKey(ctx, tsk); err != nil { - return api.CirculatingSupply{}, err - } - return a.api.StateVMCirculatingSupplyInternal(ctx, tsk) -} - -func (a *GatewayAPI) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) { - return sigs.Verify(sig, k, msg) == nil, nil -} - -func (a *GatewayAPI) WalletBalance(ctx context.Context, k address.Address) (types.BigInt, error) { - return a.api.WalletBalance(ctx, k) -} - -var _ api.Gateway = (*GatewayAPI)(nil) -var _ full.ChainModuleAPI = (*GatewayAPI)(nil) -var _ full.GasModuleAPI = (*GatewayAPI)(nil) -var _ full.MpoolModuleAPI = (*GatewayAPI)(nil) -var _ full.StateModuleAPI = (*GatewayAPI)(nil) diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go index e2ef27dd9..cfda02d86 100644 --- a/cmd/lotus-gateway/main.go +++ b/cmd/lotus-gateway/main.go @@ -2,28 +2,31 @@ package main import ( "context" + "fmt" "net" - "net/http" "os" - "contrib.go.opencensus.io/exporter/prometheus" - "github.com/filecoin-project/go-jsonrpc" - promclient "github.com/prometheus/client_golang/prometheus" - "go.opencensus.io/tag" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/build" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/lib/lotuslog" - "github.com/filecoin-project/lotus/metrics" + "github.com/urfave/cli/v2" + "go.opencensus.io/stats/view" + "golang.org/x/xerrors" logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/stats/view" - "github.com/gorilla/mux" - "github.com/urfave/cli/v2" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-state-types/abi" + + manet "github.com/multiformats/go-multiaddr/net" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/gateway" + "github.com/filecoin-project/lotus/lib/lotuslog" + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node" ) var log = logging.Logger("gateway") @@ -33,6 +36,7 @@ func main() { local := []*cli.Command{ runCmd, + checkCmd, } app := &cli.App{ @@ -52,11 +56,60 @@ func main() { app.Setup() if err := app.Run(os.Args); err != nil { - log.Warnf("%+v", err) + log.Errorf("%+v", err) + os.Exit(1) return } } +var checkCmd = &cli.Command{ + Name: "check", + Usage: "performs a simple check to verify that a connection can be made to a gateway", + ArgsUsage: "[apiInfo]", + Description: `Any valid value for FULLNODE_API_INFO is a valid argument to the check command. + + Examples + - ws://127.0.0.1:2346 + - http://127.0.0.1:2346 + - /ip4/127.0.0.1/tcp/2346`, + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ainfo := cliutil.ParseApiInfo(cctx.Args().First()) + + darg, err := ainfo.DialArgs("v1") + if err != nil { + return err + } + + api, closer, err := client.NewFullNodeRPCV1(ctx, darg, nil) + if err != nil { + return err + } + + defer closer() + + addr, err := address.NewIDAddress(100) + if err != nil { + return err + } + + laddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK) + if err != nil { + return err + } + + if laddr != addr { + return fmt.Errorf("looked up addresses does not match returned address, %s != %s", addr, laddr) + } + + return nil + }, +} + var runCmd = &cli.Command{ Name: "run", Usage: "Start api server", @@ -70,14 +123,20 @@ var runCmd = &cli.Command{ Name: "api-max-req-size", Usage: "maximum API request size accepted by the JSON RPC server", }, + &cli.DurationFlag{ + Name: "api-max-lookback", + Usage: "maximum duration allowable for tipset lookbacks", + Value: gateway.DefaultLookbackCap, + }, + &cli.Int64Flag{ + Name: "api-wait-lookback-limit", + Usage: "maximum number of blocks to search back through for message inclusion", + Value: int64(gateway.DefaultStateWaitLookbackLimit), + }, }, Action: func(cctx *cli.Context) error { log.Info("Starting lotus gateway") - ctx := lcli.ReqContext(cctx) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - // Register all metric views if err := view.Register( metrics.ChainNodeViews..., @@ -91,66 +150,44 @@ var runCmd = &cli.Command{ } defer closer() - address := cctx.String("listen") - mux := mux.NewRouter() + var ( + lookbackCap = cctx.Duration("api-max-lookback") + address = cctx.String("listen") + waitLookback = abi.ChainEpoch(cctx.Int64("api-wait-lookback-limit")) + ) - log.Info("Setting up API endpoint at " + address) - - serveRpc := func(path string, hnd interface{}) { - serverOptions := make([]jsonrpc.ServerOption, 0) - if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 { - serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize))) - } - rpcServer := jsonrpc.NewServer(serverOptions...) - rpcServer.Register("Filecoin", hnd) - - mux.Handle(path, rpcServer) + serverOptions := make([]jsonrpc.ServerOption, 0) + if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 { + serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize))) } - ma := metrics.MetricedGatewayAPI(NewGatewayAPI(api)) + log.Info("setting up API endpoint at " + address) - serveRpc("/rpc/v1", ma) - serveRpc("/rpc/v0", lapi.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma)) + addr, err := net.ResolveTCPAddr("tcp", address) + if err != nil { + return xerrors.Errorf("failed to resolve endpoint address: %w", err) + } - registry := promclient.DefaultRegisterer.(*promclient.Registry) - exporter, err := prometheus.NewExporter(prometheus.Options{ - Registry: registry, - Namespace: "lotus_gw", + maddr, err := manet.FromNetAddr(addr) + if err != nil { + return xerrors.Errorf("failed to convert endpoint address to multiaddr: %w", err) + } + + gwapi := gateway.NewNode(api, lookbackCap, waitLookback) + h, err := gateway.Handler(gwapi, serverOptions...) + if err != nil { + return xerrors.Errorf("failed to set up gateway HTTP handler") + } + + stopFunc, err := node.ServeRPC(h, "lotus-gateway", maddr) + if err != nil { + return xerrors.Errorf("failed to serve rpc endpoint: %w", err) + } + + <-node.MonitorShutdown(nil, node.ShutdownHandler{ + Component: "rpc", + StopFunc: stopFunc, }) - if err != nil { - return err - } - mux.Handle("/debug/metrics", exporter) - - mux.PathPrefix("/").Handler(http.DefaultServeMux) - - /*ah := &auth.Handler{ - Verify: nodeApi.AuthVerify, - Next: mux.ServeHTTP, - }*/ - - srv := &http.Server{ - Handler: mux, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-gateway")) - return ctx - }, - } - - go func() { - <-ctx.Done() - log.Warn("Shutting down...") - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() - - nl, err := net.Listen("tcp", address) - if err != nil { - return err - } - - return srv.Serve(nl) + return nil }, } diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index 50ecb91c5..adcf0f869 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -63,9 +63,10 @@ func main() { } app := &cli.App{ - Name: "lotus-worker", - Usage: "Remote miner worker", - Version: build.UserVersion(), + Name: "lotus-worker", + Usage: "Remote miner worker", + Version: build.UserVersion(), + EnableBashCompletion: true, Flags: []cli.Flag{ &cli.StringFlag{ Name: FlagWorkerRepo, @@ -361,9 +362,10 @@ var runCmd = &cli.Command{ return xerrors.Errorf("could not get api info: %w", err) } - remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit")) + remote := stores.NewRemote(localStore, nodeApi, sminfo.AuthHeader(), cctx.Int("parallel-fetch-limit"), + &stores.DefaultPartialFileHandler{}) - fh := &stores.FetchHandler{Local: localStore} + fh := &stores.FetchHandler{Local: localStore, PfHandler: &stores.DefaultPartialFileHandler{}} remoteHandler := func(w http.ResponseWriter, r *http.Request) { if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { w.WriteHeader(401) diff --git a/cmd/lotus-seed/genesis.go b/cmd/lotus-seed/genesis.go index d5f1d5ad6..a27cc0a2f 100644 --- a/cmd/lotus-seed/genesis.go +++ b/cmd/lotus-seed/genesis.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -39,6 +41,7 @@ var genesisCmd = &cli.Command{ genesisAddMsigsCmd, genesisSetVRKCmd, genesisSetRemainderCmd, + genesisSetActorVersionCmd, genesisCarCmd, }, } @@ -56,6 +59,7 @@ var genesisNewCmd = &cli.Command{ return xerrors.New("seed genesis new [genesis.json]") } out := genesis.Template{ + NetworkVersion: build.NewestNetworkVersion, Accounts: []genesis.Actor{}, Miners: []genesis.Miner{}, VerifregRootKey: gen.DefaultVerifregRootkeyActor, @@ -503,6 +507,53 @@ var genesisSetRemainderCmd = &cli.Command{ }, } +var genesisSetActorVersionCmd = &cli.Command{ + Name: "set-network-version", + Usage: "Set the version that this network will start from", + ArgsUsage: " ", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return fmt.Errorf("must specify genesis file and network version (e.g. '0'") + } + + genf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return err + } + + var template genesis.Template + b, err := ioutil.ReadFile(genf) + if err != nil { + return xerrors.Errorf("read genesis template: %w", err) + } + + if err := json.Unmarshal(b, &template); err != nil { + return xerrors.Errorf("unmarshal genesis template: %w", err) + } + + nv, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("parsing network version: %w", err) + } + + if nv > uint64(build.NewestNetworkVersion) { + return xerrors.Errorf("invalid network version: %d", nv) + } + + template.NetworkVersion = network.Version(nv) + + b, err = json.MarshalIndent(&template, "", " ") + if err != nil { + return err + } + + if err := ioutil.WriteFile(genf, b, 0644); err != nil { + return err + } + return nil + }, +} + var genesisCarCmd = &cli.Command{ Name: "car", Description: "write genesis car file", @@ -521,7 +572,7 @@ var genesisCarCmd = &cli.Command{ } ofile := c.String("out") jrnl := journal.NilJournal() - bstor := blockstore.NewMemorySync() + bstor := blockstore.WrapIDStore(blockstore.NewMemorySync()) sbldr := vm.Syscalls(ffiwrapper.ProofVerifier) _, err := testing.MakeGenesis(ofile, c.Args().First())(bstor, sbldr, jrnl)() return err diff --git a/cmd/lotus-seed/main.go b/cmd/lotus-seed/main.go index c4e62b419..42f4b74e4 100644 --- a/cmd/lotus-seed/main.go +++ b/cmd/lotus-seed/main.go @@ -94,6 +94,10 @@ var preSealCmd = &cli.Command{ Name: "fake-sectors", Value: false, }, + &cli.IntFlag{ + Name: "network-version", + Usage: "specify network version", + }, }, Action: func(c *cli.Context) error { sdir := c.String("sector-dir") @@ -129,7 +133,12 @@ var preSealCmd = &cli.Command{ } sectorSize := abi.SectorSize(sectorSizeInt) - spt, err := miner.SealProofTypeFromSectorSize(sectorSize, network.Version0) + nv := build.NewestNetworkVersion + if c.IsSet("network-version") { + nv = network.Version(c.Uint64("network-version")) + } + + spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv) if err != nil { return err } diff --git a/cmd/lotus-shed/actor.go b/cmd/lotus-shed/actor.go new file mode 100644 index 000000000..b78f28349 --- /dev/null +++ b/cmd/lotus-shed/actor.go @@ -0,0 +1,740 @@ +package main + +import ( + "fmt" + "os" + + "github.com/fatih/color" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/tablewriter" +) + +var actorCmd = &cli.Command{ + Name: "actor", + Usage: "manipulate the miner actor", + Subcommands: []*cli.Command{ + actorWithdrawCmd, + actorSetOwnerCmd, + actorControl, + actorProposeChangeWorker, + actorConfirmChangeWorker, + }, +} + +var actorWithdrawCmd = &cli.Command{ + Name: "withdraw", + Usage: "withdraw available balance", + ArgsUsage: "[amount (FIL)]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + }, + Action: func(cctx *cli.Context) error { + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + available, err := nodeAPI.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + amount := available + if cctx.Args().Present() { + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amount = abi.TokenAmount(f) + + if amount.GreaterThan(available) { + return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", amount, available) + } + } + + params, err := actors.SerializeParams(&miner2.WithdrawBalanceParams{ + AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor + }) + if err != nil { + return err + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: mi.Owner, + Value: types.NewInt(0), + Method: miner.Methods.WithdrawBalance, + Params: params, + }, &api.MessageSendSpec{MaxFee: abi.TokenAmount(types.MustParseFIL("0.1"))}) + if err != nil { + return err + } + + fmt.Printf("Requested rewards withdrawal in message %s\n", smsg.Cid()) + + return nil + }, +} + +var actorSetOwnerCmd = &cli.Command{ + Name: "set-owner", + Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)", + ArgsUsage: "[newOwnerAddress senderAddress]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + if cctx.NArg() != 2 { + return fmt.Errorf("must pass new owner address and sender address") + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddrId, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + fa, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + fromAddrId, err := nodeAPI.StateLookupID(ctx, fa, types.EmptyTSK) + if err != nil { + return err + } + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if fromAddrId != mi.Owner && fromAddrId != newAddrId { + return xerrors.New("from address must either be the old owner or the new owner") + } + + sp, err := actors.SerializeParams(&newAddrId) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: fromAddrId, + To: maddr, + Method: miner.Methods.ChangeOwnerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Println("owner change failed!") + return err + } + + fmt.Println("message succeeded!") + + return nil + }, +} + +var actorControl = &cli.Command{ + Name: "control", + Usage: "Manage control addresses", + Subcommands: []*cli.Command{ + actorControlList, + actorControlSet, + }, +} + +var actorControlList = &cli.Command{ + Name: "list", + Usage: "Get currently set control addresses", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "verbose", + }, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + tw := tablewriter.New( + tablewriter.Col("name"), + tablewriter.Col("ID"), + tablewriter.Col("key"), + tablewriter.Col("balance"), + ) + + printKey := func(name string, a address.Address) { + b, err := nodeAPI.WalletBalance(ctx, a) + if err != nil { + fmt.Printf("%s\t%s: error getting balance: %s\n", name, a, err) + return + } + + k, err := nodeAPI.StateAccountKey(ctx, a, types.EmptyTSK) + if err != nil { + fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err) + return + } + + kstr := k.String() + if !cctx.Bool("verbose") { + kstr = kstr[:9] + "..." + } + + bstr := types.FIL(b).String() + switch { + case b.LessThan(types.FromFil(10)): + bstr = color.RedString(bstr) + case b.LessThan(types.FromFil(50)): + bstr = color.YellowString(bstr) + default: + bstr = color.GreenString(bstr) + } + + tw.Write(map[string]interface{}{ + "name": name, + "ID": a, + "key": kstr, + "balance": bstr, + }) + } + + printKey("owner", mi.Owner) + printKey("worker", mi.Worker) + for i, ca := range mi.ControlAddresses { + printKey(fmt.Sprintf("control-%d", i), ca) + } + + return tw.Flush(os.Stdout) + }, +} + +var actorControlSet = &cli.Command{ + Name: "set", + Usage: "Set control address(-es)", + ArgsUsage: "[...address]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + del := map[address.Address]struct{}{} + existing := map[address.Address]struct{}{} + for _, controlAddress := range mi.ControlAddresses { + ka, err := nodeAPI.StateAccountKey(ctx, controlAddress, types.EmptyTSK) + if err != nil { + return err + } + + del[ka] = struct{}{} + existing[ka] = struct{}{} + } + + var toSet []address.Address + + for i, as := range cctx.Args().Slice() { + a, err := address.NewFromString(as) + if err != nil { + return xerrors.Errorf("parsing address %d: %w", i, err) + } + + ka, err := nodeAPI.StateAccountKey(ctx, a, types.EmptyTSK) + if err != nil { + return err + } + + // make sure the address exists on chain + _, err = nodeAPI.StateLookupID(ctx, ka, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("looking up %s: %w", ka, err) + } + + delete(del, ka) + toSet = append(toSet, ka) + } + + for a := range del { + fmt.Println("Remove", a) + } + for _, a := range toSet { + if _, exists := existing[a]; !exists { + fmt.Println("Add", a) + } + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: toSet, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ChangeWorkerAddress, + + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Message CID:", smsg.Cid()) + + return nil + }, +} + +var actorProposeChangeWorker = &cli.Command{ + Name: "propose-change-worker", + Usage: "Propose a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + if !cctx.Bool("really-do-it") { + fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + return nil + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + if mi.Worker == newAddr { + return fmt.Errorf("worker address already set to %s", na) + } + } else { + if mi.NewWorker == newAddr { + return fmt.Errorf("change to worker address %s already pending", na) + } + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: newAddr, + NewControlAddrs: mi.ControlAddresses, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ChangeWorkerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose worker change failed!") + return err + } + + mi, err = nodeAPI.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.NewWorker != newAddr { + return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) + } + + fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) + fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + + return nil + }, +} + +var actorConfirmChangeWorker = &cli.Command{ + Name: "confirm-change-worker", + Usage: "Confirm a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + if !cctx.Bool("really-do-it") { + fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + return nil + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + nodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := nodeAPI.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + if maddr.Empty() { + minerAPI, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err = minerAPI.ActorAddress(ctx) + if err != nil { + return err + } + } + + mi, err := nodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + return xerrors.Errorf("no worker key change proposed") + } else if mi.NewWorker != newAddr { + return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker) + } + + if head, err := nodeAPI.ChainHead(ctx); err != nil { + return xerrors.Errorf("failed to get the chain head: %w", err) + } else if head.Height() < mi.WorkerChangeEpoch { + return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height()) + } + + smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: miner.Methods.ConfirmUpdateWorkerKey, + Value: big.Zero(), + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Confirm Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := nodeAPI.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Worker change failed!") + return err + } + + mi, err = nodeAPI.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.Worker != newAddr { + return fmt.Errorf("Confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index bce7be3d5..87530c666 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -3,11 +3,14 @@ package main import ( "context" "encoding/csv" + "encoding/json" "fmt" "io" "os" + "runtime" "strconv" "strings" + "sync" "time" "github.com/filecoin-project/lotus/build" @@ -70,6 +73,266 @@ var auditsCmd = &cli.Command{ chainBalanceStateCmd, chainPledgeCmd, fillBalancesCmd, + duplicatedMessagesCmd, + }, +} + +var duplicatedMessagesCmd = &cli.Command{ + Name: "duplicate-messages", + Usage: "Check for duplicate messages included in a tipset.", + UsageText: `Check for duplicate messages included in a tipset. + +Due to Filecoin's expected consensus, a tipset may include the same message multiple times in +different blocks. The message will only be executed once. + +This command will find such duplicate messages and print them to standard out as newline-delimited +JSON. Status messages in the form of "H: $HEIGHT ($PROGRESS%)" will be printed to standard error for +every day of chain processed. +`, + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "parallel", + Usage: "the number of parallel threads for block processing", + DefaultText: "half the number of cores", + }, + &cli.IntFlag{ + Name: "start", + Usage: "the first epoch to check", + DefaultText: "genesis", + }, + &cli.IntFlag{ + Name: "end", + Usage: "the last epoch to check", + DefaultText: "the current head", + }, + &cli.IntSliceFlag{ + Name: "method", + Usage: "filter results by method number", + DefaultText: "all methods", + }, + &cli.StringSliceFlag{ + Name: "include-to", + Usage: "include only messages to the given address (does not perform address resolution)", + DefaultText: "all recipients", + }, + &cli.StringSliceFlag{ + Name: "include-from", + Usage: "include only messages from the given address (does not perform address resolution)", + DefaultText: "all senders", + }, + &cli.StringSliceFlag{ + Name: "exclude-to", + Usage: "exclude messages to the given address (does not perform address resolution)", + }, + &cli.StringSliceFlag{ + Name: "exclude-from", + Usage: "exclude messages from the given address (does not perform address resolution)", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + var head *types.TipSet + if cctx.IsSet("end") { + epoch := abi.ChainEpoch(cctx.Int("end")) + head, err = api.ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK) + } else { + head, err = api.ChainHead(ctx) + } + if err != nil { + return err + } + + var printLk sync.Mutex + + threads := runtime.NumCPU() / 2 + if cctx.IsSet("parallel") { + threads = cctx.Int("int") + if threads <= 0 { + return fmt.Errorf("parallelism needs to be at least 1") + } + } else if threads == 0 { + threads = 1 // if we have one core, but who are we kidding... + } + + throttle := make(chan struct{}, threads) + + methods := map[abi.MethodNum]bool{} + for _, m := range cctx.IntSlice("method") { + if m < 0 { + return fmt.Errorf("expected method numbers to be non-negative") + } + methods[abi.MethodNum(m)] = true + } + + addressSet := func(flag string) (map[address.Address]bool, error) { + if !cctx.IsSet(flag) { + return nil, nil + } + addrs := cctx.StringSlice(flag) + set := make(map[address.Address]bool, len(addrs)) + for _, addrStr := range addrs { + addr, err := address.NewFromString(addrStr) + if err != nil { + return nil, fmt.Errorf("failed to parse address %s: %w", addrStr, err) + } + set[addr] = true + } + return set, nil + } + + onlyFrom, err := addressSet("include-from") + if err != nil { + return err + } + onlyTo, err := addressSet("include-to") + if err != nil { + return err + } + excludeFrom, err := addressSet("exclude-from") + if err != nil { + return err + } + excludeTo, err := addressSet("exclude-to") + if err != nil { + return err + } + + target := abi.ChainEpoch(cctx.Int("start")) + if target < 0 || target > head.Height() { + return fmt.Errorf("start height must be greater than 0 and less than the end height") + } + totalEpochs := head.Height() - target + + for target <= head.Height() { + select { + case throttle <- struct{}{}: + case <-ctx.Done(): + return ctx.Err() + } + + go func(ts *types.TipSet) { + defer func() { + <-throttle + }() + + type addrNonce struct { + s address.Address + n uint64 + } + anonce := func(m *types.Message) addrNonce { + return addrNonce{ + s: m.From, + n: m.Nonce, + } + } + + msgs := map[addrNonce]map[cid.Cid]*types.Message{} + + processMessage := func(c cid.Cid, m *types.Message) { + // Filter + if len(methods) > 0 && !methods[m.Method] { + return + } + if len(onlyFrom) > 0 && !onlyFrom[m.From] { + return + } + if len(onlyTo) > 0 && !onlyTo[m.To] { + return + } + if excludeFrom[m.From] || excludeTo[m.To] { + return + } + + // Record + msgSet, ok := msgs[anonce(m)] + if !ok { + msgSet = make(map[cid.Cid]*types.Message, 1) + msgs[anonce(m)] = msgSet + } + msgSet[c] = m + } + + encoder := json.NewEncoder(os.Stdout) + + for _, bh := range ts.Blocks() { + bms, err := api.ChainGetBlockMessages(ctx, bh.Cid()) + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + return + } + + for i, m := range bms.BlsMessages { + processMessage(bms.Cids[i], m) + } + + for i, m := range bms.SecpkMessages { + processMessage(bms.Cids[len(bms.BlsMessages)+i], &m.Message) + } + } + for _, ms := range msgs { + if len(ms) == 1 { + continue + } + type Msg struct { + Cid string + Value string + Method uint64 + } + grouped := map[string][]Msg{} + for c, m := range ms { + addr := m.To.String() + grouped[addr] = append(grouped[addr], Msg{ + Cid: c.String(), + Value: types.FIL(m.Value).String(), + Method: uint64(m.Method), + }) + } + printLk.Lock() + err := encoder.Encode(grouped) + if err != nil { + fmt.Fprintln(os.Stderr, "ERROR: ", err) + } + printLk.Unlock() + } + }(head) + + if head.Parents().IsEmpty() { + break + } + + head, err = api.ChainGetTipSet(ctx, head.Parents()) + if err != nil { + return err + } + + if head.Height()%2880 == 0 { + printLk.Lock() + fmt.Fprintf(os.Stderr, "H: %s (%d%%)\n", head.Height(), (100*(head.Height()-target))/totalEpochs) + printLk.Unlock() + } + } + + for i := 0; i < threads; i++ { + select { + case throttle <- struct{}{}: + case <-ctx.Done(): + return ctx.Err() + } + + } + + printLk.Lock() + fmt.Fprintf(os.Stderr, "H: %s (100%%)\n", head.Height()) + printLk.Unlock() + + return nil }, } diff --git a/cmd/lotus-shed/cron-count.go b/cmd/lotus-shed/cron-count.go new file mode 100644 index 000000000..622f38791 --- /dev/null +++ b/cmd/lotus-shed/cron-count.go @@ -0,0 +1,99 @@ +package main + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var cronWcCmd = &cli.Command{ + Name: "cron-wc", + Description: "cron stats", + Subcommands: []*cli.Command{ + minerDeadlineCronCountCmd, + }, +} + +var minerDeadlineCronCountCmd = &cli.Command{ + Name: "deadline", + Description: "list all addresses of miners with active deadline crons", + Action: func(c *cli.Context) error { + return countDeadlineCrons(c) + }, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + Usage: "specify tipset state to search on (pass comma separated array of cids)", + }, + }, +} + +func findDeadlineCrons(c *cli.Context) (map[address.Address]struct{}, error) { + api, acloser, err := lcli.GetFullNodeAPI(c) + if err != nil { + return nil, err + } + defer acloser() + ctx := lcli.ReqContext(c) + + ts, err := lcli.LoadTipSet(ctx, c, api) + if err != nil { + return nil, err + } + if ts == nil { + ts, err = api.ChainHead(ctx) + if err != nil { + return nil, err + } + } + + mAddrs, err := api.StateListMiners(ctx, ts.Key()) + if err != nil { + return nil, err + } + activeMiners := make(map[address.Address]struct{}) + for _, mAddr := range mAddrs { + // All miners have active cron before v4. + // v4 upgrade epoch is last epoch running v3 epoch and api.StateReadState reads + // parent state, so v4 state isn't read until upgrade epoch + 2 + if ts.Height() <= build.UpgradeTurboHeight+1 { + activeMiners[mAddr] = struct{}{} + continue + } + st, err := api.StateReadState(ctx, mAddr, ts.Key()) + if err != nil { + return nil, err + } + minerState, ok := st.State.(map[string]interface{}) + if !ok { + return nil, xerrors.Errorf("internal error: failed to cast miner state to expected map type") + } + + activeDlineIface, ok := minerState["DeadlineCronActive"] + if !ok { + return nil, xerrors.Errorf("miner %s had no deadline state, is this a v3 state root?", mAddr) + } + active := activeDlineIface.(bool) + if active { + activeMiners[mAddr] = struct{}{} + } + } + + return activeMiners, nil +} + +func countDeadlineCrons(c *cli.Context) error { + activeMiners, err := findDeadlineCrons(c) + if err != nil { + return err + } + for addr := range activeMiners { + fmt.Printf("%s\n", addr) + } + + return nil +} diff --git a/cmd/lotus-shed/election.go b/cmd/lotus-shed/election.go index c844203d6..d49d5c04f 100644 --- a/cmd/lotus-shed/election.go +++ b/cmd/lotus-shed/election.go @@ -1,10 +1,16 @@ package main import ( + "context" "encoding/binary" "fmt" "math/rand" + "github.com/filecoin-project/lotus/api/v0api" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -18,6 +24,7 @@ var electionCmd = &cli.Command{ Subcommands: []*cli.Command{ electionRunDummy, electionEstimate, + electionBacktest, }, } @@ -124,3 +131,97 @@ var electionEstimate = &cli.Command{ return nil }, } + +var electionBacktest = &cli.Command{ + Name: "backtest", + Usage: "Backtest elections with given miner", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "height", + Usage: "blockchain head height", + }, + &cli.IntFlag{ + Name: "count", + Usage: "number of won elections to look for", + Value: 120, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("GetFullNodeAPI: %w", err) + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + var head *types.TipSet + if cctx.IsSet("height") { + head, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("height")), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("ChainGetTipSetByHeight: %w", err) + } + } else { + head, err = api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("ChainHead: %w", err) + } + } + + miner, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("miner address: %w", err) + } + + count := cctx.Int("count") + if count < 1 { + return xerrors.Errorf("count: %d", count) + } + + fmt.Println("height, winCount") + roundEnd := head.Height() + abi.ChainEpoch(1) + for i := 0; i < count; { + for round := head.Height() + abi.ChainEpoch(1); round <= roundEnd; round++ { + i++ + win, err := backTestWinner(ctx, miner, round, head, api) + if err == nil && win != nil { + fmt.Printf("%d, %d\n", round, win.WinCount) + } + } + + roundEnd = head.Height() + head, err = api.ChainGetTipSet(ctx, head.Parents()) + if err != nil { + break + } + } + return nil + }, +} + +func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainEpoch, ts *types.TipSet, api v0api.FullNode) (*types.ElectionProof, error) { + mbi, err := api.MinerGetBaseInfo(ctx, miner, round, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("failed to get mining base info: %w", err) + } + if mbi == nil { + return nil, nil + } + if !mbi.EligibleForMining { + return nil, nil + } + + brand := mbi.PrevBeaconEntry + bvals := mbi.BeaconEntries + if len(bvals) > 0 { + brand = bvals[len(bvals)-1] + } + + winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api) + if err != nil { + return nil, xerrors.Errorf("failed to check if we win next round: %w", err) + } + + return winner, nil +} diff --git a/cmd/lotus-shed/export-car.go b/cmd/lotus-shed/export-car.go new file mode 100644 index 000000000..97e4fb6c6 --- /dev/null +++ b/cmd/lotus-shed/export-car.go @@ -0,0 +1,103 @@ +package main + +import ( + "fmt" + "io" + "os" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + format "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + "github.com/ipld/go-car" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/node/repo" +) + +func carWalkFunc(nd format.Node) (out []*format.Link, err error) { + for _, link := range nd.Links() { + if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed { + continue + } + out = append(out, link) + } + return out, nil +} + +var exportCarCmd = &cli.Command{ + Name: "export-car", + Description: "Export a car from repo (requires node to be offline)", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name and object")) + } + + outfile := cctx.Args().First() + var roots []cid.Cid + for _, arg := range cctx.Args().Tail() { + c, err := cid.Decode(arg) + if err != nil { + return err + } + roots = append(roots, c) + } + + ctx := lcli.ReqContext(cctx) + + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("opening fs repo: %w", err) + } + + exists, err := r.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + lr, err := r.Lock(repo.FullNode) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + fi, err := os.Create(outfile) + if err != nil { + return xerrors.Errorf("opening the output file: %w", err) + } + + defer fi.Close() //nolint:errcheck + + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc) + if err != nil { + return err + } + return nil + }, +} diff --git a/cmd/lotus-shed/frozen-miners.go b/cmd/lotus-shed/frozen-miners.go index 6b843f0d6..ed09c00c5 100644 --- a/cmd/lotus-shed/frozen-miners.go +++ b/cmd/lotus-shed/frozen-miners.go @@ -35,12 +35,6 @@ var frozenMinersCmd = &cli.Command{ if err != nil { return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } queryEpoch := ts.Height() diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index ebe4f014a..e06b63080 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -20,6 +20,7 @@ func main() { base32Cmd, base16Cmd, bitFieldCmd, + cronWcCmd, frozenMinersCmd, keyinfoCmd, jwtCmd, @@ -34,6 +35,7 @@ func main() { postFindCmd, proofsCmd, verifRegCmd, + marketCmd, miscCmd, mpoolCmd, genesisVerifyCmd, @@ -41,6 +43,7 @@ func main() { minerCmd, mpoolStatsCmd, exportChainCmd, + exportCarCmd, consensusCmd, storageStatsCmd, syncCmd, @@ -54,6 +57,9 @@ func main() { cidCmd, blockmsgidCmd, signaturesCmd, + actorCmd, + minerTypesCmd, + minerMultisigsCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go new file mode 100644 index 000000000..e2e322784 --- /dev/null +++ b/cmd/lotus-shed/market.go @@ -0,0 +1,102 @@ +package main + +import ( + "fmt" + + lcli "github.com/filecoin-project/lotus/cli" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var marketCmd = &cli.Command{ + Name: "market", + Usage: "Interact with the market actor", + Flags: []cli.Flag{}, + Subcommands: []*cli.Command{ + marketDealFeesCmd, + }, +} + +var marketDealFeesCmd = &cli.Command{ + Name: "get-deal-fees", + Usage: "View the storage fees associated with a particular deal or storage provider", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "provider", + Usage: "provider whose outstanding fees you'd like to calculate", + }, + &cli.IntFlag{ + Name: "dealId", + Usage: "deal whose outstanding fees you'd like to calculate", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + ts, err := lcli.LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + ht := ts.Height() + + if cctx.IsSet("provider") { + p, err := address.NewFromString(cctx.String("provider")) + if err != nil { + return fmt.Errorf("failed to parse provider: %w", err) + } + + deals, err := api.StateMarketDeals(ctx, ts.Key()) + if err != nil { + return err + } + + ef := big.Zero() + pf := big.Zero() + count := 0 + + for _, deal := range deals { + if deal.Proposal.Provider == p { + e, p := deal.Proposal.GetDealFees(ht) + ef = big.Add(ef, e) + pf = big.Add(pf, p) + count++ + } + } + + fmt.Println("Total deals: ", count) + fmt.Println("Total earned fees: ", ef) + fmt.Println("Total pending fees: ", pf) + fmt.Println("Total fees: ", big.Add(ef, pf)) + + return nil + } + + if dealid := cctx.Int("dealId"); dealid != 0 { + deal, err := api.StateMarketStorageDeal(ctx, abi.DealID(dealid), ts.Key()) + if err != nil { + return err + } + + ef, pf := deal.Proposal.GetDealFees(ht) + + fmt.Println("Earned fees: ", ef) + fmt.Println("Pending fees: ", pf) + fmt.Println("Total fees: ", big.Add(ef, pf)) + + return nil + } + + return xerrors.New("must provide either --provider or --dealId flag") + }, +} diff --git a/cmd/lotus-shed/math.go b/cmd/lotus-shed/math.go index 434559f09..c6d4ed0c9 100644 --- a/cmd/lotus-shed/math.go +++ b/cmd/lotus-shed/math.go @@ -8,8 +8,10 @@ import ( "strings" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/types" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" ) var mathCmd = &cli.Command{ @@ -17,6 +19,7 @@ var mathCmd = &cli.Command{ Usage: "utility commands around doing math on a list of numbers", Subcommands: []*cli.Command{ mathSumCmd, + mathAggFeesCmd, }, } @@ -101,3 +104,30 @@ var mathSumCmd = &cli.Command{ return nil }, } + +var mathAggFeesCmd = &cli.Command{ + Name: "agg-fees", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "size", + Required: true, + }, + &cli.StringFlag{ + Name: "base-fee", + Usage: "baseFee aFIL", + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + as := cctx.Int("size") + + bf, err := types.BigFromString(cctx.String("base-fee")) + if err != nil { + return xerrors.Errorf("parsing basefee: %w", err) + } + + fmt.Println(types.FIL(miner5.AggregateNetworkFee(as, bf))) + + return nil + }, +} diff --git a/cmd/lotus-shed/miner-multisig.go b/cmd/lotus-shed/miner-multisig.go new file mode 100644 index 000000000..d9f158090 --- /dev/null +++ b/cmd/lotus-shed/miner-multisig.go @@ -0,0 +1,388 @@ +package main + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/filecoin-project/go-state-types/abi" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var minerMultisigsCmd = &cli.Command{ + Name: "miner-multisig", + Description: "a collection of utilities for using multisigs as owner addresses of miners", + Subcommands: []*cli.Command{ + mmProposeWithdrawBalance, + mmApproveWithdrawBalance, + mmProposeChangeOwner, + mmApproveChangeOwner, + }, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "specify address to send message from", + Required: true, + }, + &cli.StringFlag{ + Name: "multisig", + Usage: "specify multisig that will receive the message", + Required: true, + }, + &cli.StringFlag{ + Name: "miner", + Usage: "specify miner being acted upon", + Required: true, + }, + }, +} + +var mmProposeWithdrawBalance = &cli.Command{ + Name: "propose-withdraw", + Usage: "Propose to withdraw FIL from the miner", + ArgsUsage: "[amount]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass amount to withdraw") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + val, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return err + } + + sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{ + AmountRequested: abi.TokenAmount(val), + }) + if err != nil { + return err + } + + pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp) + if err != nil { + return xerrors.Errorf("proposing message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") + return err + } + + var retval msig5.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + + fmt.Printf("Transaction ID: %d\n", retval.TxnID) + if retval.Applied { + fmt.Printf("Transaction was executed during propose\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } + + return nil + }, +} + +var mmApproveWithdrawBalance = &cli.Command{ + Name: "approve-withdraw", + Usage: "Approve to withdraw FIL from the miner", + ArgsUsage: "[amount txnId proposer]", + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 3 { + return fmt.Errorf("must pass amount, txn Id, and proposer address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + val, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return err + } + + sp, err := actors.SerializeParams(&miner5.WithdrawBalanceParams{ + AmountRequested: abi.TokenAmount(val), + }) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + proposer, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.WithdrawBalance), sp) + if err != nil { + return xerrors.Errorf("approving message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") + return err + } + + var retval msig5.ApproveReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal approve return value: %w", err) + } + + if retval.Applied { + fmt.Printf("Transaction was executed with the approve\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } else { + fmt.Println("Transaction was approved, but not executed") + } + return nil + }, +} + +var mmProposeChangeOwner = &cli.Command{ + Name: "propose-change-owner", + Usage: "Propose an owner address change", + ArgsUsage: "[newOwner]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass new owner address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.Owner == newAddr { + return fmt.Errorf("owner address already set to %s", na) + } + + sp, err := actors.SerializeParams(&newAddr) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp) + if err != nil { + return xerrors.Errorf("proposing message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose owner change tx failed!") + return err + } + + var retval msig5.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + + fmt.Printf("Transaction ID: %d\n", retval.TxnID) + if retval.Applied { + fmt.Printf("Transaction was executed during propose\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } + return nil + }, +} + +var mmApproveChangeOwner = &cli.Command{ + Name: "approve-change-owner", + Usage: "Approve an owner address change", + ArgsUsage: "[newOwner txnId proposer]", + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 3 { + return fmt.Errorf("must pass new owner address, txn Id, and proposer address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + proposer, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.Owner == newAddr { + return fmt.Errorf("owner address already set to %s", na) + } + + sp, err := actors.SerializeParams(&newAddr) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + acid, err := api.MsigApproveTxnHash(ctx, multisigAddr, txid, proposer, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeOwnerAddress), sp) + if err != nil { + return xerrors.Errorf("approving message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Approve Message CID:", acid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, acid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Approve owner change tx failed!") + return err + } + + var retval msig5.ApproveReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal approve return value: %w", err) + } + + if retval.Applied { + fmt.Printf("Transaction was executed with the approve\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } else { + fmt.Println("Transaction was approved, but not executed") + } + return nil + }, +} + +func getInputs(cctx *cli.Context) (address.Address, address.Address, address.Address, error) { + multisigAddr, err := address.NewFromString(cctx.String("multisig")) + if err != nil { + return address.Undef, address.Undef, address.Undef, err + } + + sender, err := address.NewFromString(cctx.String("from")) + if err != nil { + return address.Undef, address.Undef, address.Undef, err + } + + minerAddr, err := address.NewFromString(cctx.String("miner")) + if err != nil { + return address.Undef, address.Undef, address.Undef, err + } + + return multisigAddr, sender, minerAddr, nil +} diff --git a/cmd/lotus-shed/miner-types.go b/cmd/lotus-shed/miner-types.go new file mode 100644 index 000000000..19a30c4b9 --- /dev/null +++ b/cmd/lotus-shed/miner-types.go @@ -0,0 +1,154 @@ +package main + +import ( + "context" + "fmt" + "io" + "math/big" + + big2 "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/node/repo" + builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" + "github.com/filecoin-project/specs-actors/v4/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var minerTypesCmd = &cli.Command{ + Name: "miner-types", + Usage: "Scrape state to report on how many miners of each WindowPoStProofType exist", Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if !cctx.Args().Present() { + return fmt.Errorf("must pass state root") + } + + sroot, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier), nil) + defer cs.Close() //nolint:errcheck + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + tree, err := state.LoadStateTree(cst, sroot) + if err != nil { + return err + } + + typeMap := make(map[abi.RegisteredPoStProof]int64) + pa, err := tree.GetActor(power.Address) + if err != nil { + return err + } + + ps, err := power.Load(store, pa) + if err != nil { + return err + } + + dc := 0 + dz := power.Claim{ + RawBytePower: abi.NewStoragePower(0), + QualityAdjPower: abi.NewStoragePower(0), + } + + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + if act.Code == builtin4.StorageMinerActorCodeID { + ms, err := miner.Load(store, act) + if err != nil { + return err + } + + mi, err := ms.Info() + if err != nil { + return err + } + + if mi.WindowPoStProofType == abi.RegisteredPoStProof_StackedDrgWindow64GiBV1 { + mp, f, err := ps.MinerPower(addr) + if err != nil { + return err + } + + if f && mp.RawBytePower.Cmp(big.NewInt(10<<40)) >= 0 && mp.RawBytePower.Cmp(big.NewInt(20<<40)) < 0 { + dc = dc + 1 + dz.RawBytePower = big2.Add(dz.RawBytePower, mp.RawBytePower) + dz.QualityAdjPower = big2.Add(dz.QualityAdjPower, mp.QualityAdjPower) + } + } + + c, f := typeMap[mi.WindowPoStProofType] + if !f { + typeMap[mi.WindowPoStProofType] = 1 + } else { + typeMap[mi.WindowPoStProofType] = c + 1 + } + } + return nil + }) + if err != nil { + return xerrors.Errorf("failed to loop over actors: %w", err) + } + + for k, v := range typeMap { + fmt.Println("Type:", k, " Count: ", v) + } + + fmt.Println("Mismatched power (raw, QA): ", dz.RawBytePower, " ", dz.QualityAdjPower) + fmt.Println("Mismatched 64 GiB miner count: ", dc) + + return nil + }, +} diff --git a/cmd/lotus-shed/postfind.go b/cmd/lotus-shed/postfind.go index 83006fd09..c8a4c9907 100644 --- a/cmd/lotus-shed/postfind.go +++ b/cmd/lotus-shed/postfind.go @@ -49,12 +49,6 @@ var postFindCmd = &cli.Command{ if err != nil { return err } - if startTs == nil { - startTs, err = api.ChainHead(ctx) - if err != nil { - return err - } - } stopEpoch := startTs.Height() - abi.ChainEpoch(c.Int("lookback")) if verbose { fmt.Printf("Collecting messages between %d and %d\n", startTs.Height(), stopEpoch) diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index 6cf6ee86e..cf40e1152 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -254,7 +254,7 @@ var terminateSectorPenaltyEstimationCmd = &cli.Command{ //TODO: 4667 add an option to give a more precise estimation with pending termination penalty excluded - invocResult, err := nodeApi.StateCall(ctx, msg, types.TipSetKey{}) + invocResult, err := nodeApi.StateCall(ctx, msg, types.EmptyTSK) if err != nil { return xerrors.Errorf("fail to state call: %w", err) } diff --git a/cmd/lotus-shed/stateroot-stats.go b/cmd/lotus-shed/stateroot-stats.go index 023f782bd..6d5d57708 100644 --- a/cmd/lotus-shed/stateroot-stats.go +++ b/cmd/lotus-shed/stateroot-stats.go @@ -56,13 +56,6 @@ var staterootDiffsCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } - fn := func(ts *types.TipSet) (cid.Cid, []cid.Cid) { blk := ts.Blocks()[0] strt := blk.ParentStateRoot @@ -134,13 +127,6 @@ var staterootStatCmd = &cli.Command{ return err } - if ts == nil { - ts, err = api.ChainHead(ctx) - if err != nil { - return err - } - } - var addrs []address.Address for _, inp := range cctx.Args().Slice() { diff --git a/cmd/lotus-shed/storage-stats.go b/cmd/lotus-shed/storage-stats.go index a40f082be..a9a5744a6 100644 --- a/cmd/lotus-shed/storage-stats.go +++ b/cmd/lotus-shed/storage-stats.go @@ -2,10 +2,12 @@ package main import ( "encoding/json" + corebig "math/big" "os" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + filbig "github.com/filecoin-project/go-state-types/big" lcli "github.com/filecoin-project/lotus/cli" "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" @@ -21,13 +23,16 @@ type networkTotalsOutput struct { } type networkTotals struct { - UniqueCids int `json:"total_unique_cids"` - UniqueProviders int `json:"total_unique_providers"` - UniqueClients int `json:"total_unique_clients"` - TotalDeals int `json:"total_num_deals"` - TotalBytes int64 `json:"total_stored_data_size"` - FilplusTotalDeals int `json:"filplus_total_num_deals"` - FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` + QaNetworkPower filbig.Int `json:"total_qa_power"` + RawNetworkPower filbig.Int `json:"total_raw_capacity"` + CapacityCarryingData float64 `json:"capacity_fraction_carrying_data"` + UniqueCids int `json:"total_unique_cids"` + UniqueProviders int `json:"total_unique_providers"` + UniqueClients int `json:"total_unique_clients"` + TotalDeals int `json:"total_num_deals"` + TotalBytes int64 `json:"total_stored_data_size"` + FilplusTotalDeals int `json:"filplus_total_num_deals"` + FilplusTotalBytes int64 `json:"filplus_total_stored_data_size"` seenClient map[address.Address]bool seenProvider map[address.Address]bool @@ -66,10 +71,17 @@ var storageStatsCmd = &cli.Command{ return err } + power, err := api.StateMinerPower(ctx, address.Address{}, head.Key()) + if err != nil { + return err + } + netTotals := networkTotals{ - seenClient: make(map[address.Address]bool), - seenProvider: make(map[address.Address]bool), - seenPieceCid: make(map[cid.Cid]bool), + QaNetworkPower: power.TotalPower.QualityAdjPower, + RawNetworkPower: power.TotalPower.RawBytePower, + seenClient: make(map[address.Address]bool), + seenProvider: make(map[address.Address]bool), + seenPieceCid: make(map[cid.Cid]bool), } deals, err := api.StateMarketDeals(ctx, head.Key()) @@ -103,6 +115,11 @@ var storageStatsCmd = &cli.Command{ netTotals.UniqueClients = len(netTotals.seenClient) netTotals.UniqueProviders = len(netTotals.seenProvider) + netTotals.CapacityCarryingData, _ = new(corebig.Rat).SetFrac( + corebig.NewInt(netTotals.TotalBytes), + netTotals.RawNetworkPower.Int, + ).Float64() + return json.NewEncoder(os.Stdout).Encode( networkTotalsOutput{ Epoch: int64(head.Height()), diff --git a/cmd/lotus-shed/sync.go b/cmd/lotus-shed/sync.go index 65d2b6d6f..cab3bd29e 100644 --- a/cmd/lotus-shed/sync.go +++ b/cmd/lotus-shed/sync.go @@ -172,12 +172,13 @@ var syncScrapePowerCmd = &cli.Command{ return err } - qpercI := types.BigDiv(types.BigMul(totalWonPower.QualityAdjPower, types.NewInt(1000000)), totalPower.TotalPower.QualityAdjPower) - fmt.Println("Number of winning miners: ", len(miners)) fmt.Println("QAdjPower of winning miners: ", totalWonPower.QualityAdjPower) fmt.Println("QAdjPower of all miners: ", totalPower.TotalPower.QualityAdjPower) - fmt.Println("Percentage of winning QAdjPower: ", float64(qpercI.Int64())/10000) + fmt.Println("Percentage of winning QAdjPower: ", types.BigDivFloat( + types.BigMul(totalWonPower.QualityAdjPower, big.NewInt(100)), + totalPower.TotalPower.QualityAdjPower, + )) return nil }, diff --git a/cmd/lotus-shed/verifreg.go b/cmd/lotus-shed/verifreg.go index 988de5d53..7640e636a 100644 --- a/cmd/lotus-shed/verifreg.go +++ b/cmd/lotus-shed/verifreg.go @@ -67,11 +67,13 @@ var verifRegAddVerifierCmd = &cli.Command{ return err } - api, closer, err := lcli.GetFullNodeAPI(cctx) + srv, err := lcli.GetFullNodeServices(cctx) if err != nil { return err } - defer closer() + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() ctx := lcli.ReqContext(cctx) vrk, err := api.StateVerifiedRegistryRootKey(ctx, types.EmptyTSK) @@ -79,14 +81,21 @@ var verifRegAddVerifierCmd = &cli.Command{ return err } - smsg, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.AddVerifier), params) + proto, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(verifreg.Methods.AddVerifier), params) if err != nil { return err } - fmt.Printf("message sent, now waiting on cid: %s\n", smsg) + sm, _, err := srv.PublishMessage(ctx, proto, false) + if err != nil { + return err + } - mwait, err := api.StateWaitMsg(ctx, smsg, build.MessageConfidence) + msgCid := sm.Cid() + + fmt.Printf("message sent, now waiting on cid: %s\n", msgCid) + + mwait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { return err } diff --git a/cmd/lotus-sim/copy.go b/cmd/lotus-sim/copy.go new file mode 100644 index 000000000..5faba69f2 --- /dev/null +++ b/cmd/lotus-sim/copy.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" +) + +var copySimCommand = &cli.Command{ + Name: "copy", + ArgsUsage: "", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + if cctx.NArg() != 1 { + return fmt.Errorf("expected 1 argument") + } + name := cctx.Args().First() + return node.CopySim(cctx.Context, cctx.String("simulation"), name) + }, +} diff --git a/cmd/lotus-sim/create.go b/cmd/lotus-sim/create.go new file mode 100644 index 000000000..4867a5da5 --- /dev/null +++ b/cmd/lotus-sim/create.go @@ -0,0 +1,49 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var createSimCommand = &cli.Command{ + Name: "create", + ArgsUsage: "[tipset]", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + var ts *types.TipSet + switch cctx.NArg() { + case 0: + if err := node.Chainstore.Load(); err != nil { + return err + } + ts = node.Chainstore.GetHeaviestTipSet() + case 1: + cids, err := lcli.ParseTipSetString(cctx.Args().Get(1)) + if err != nil { + return err + } + tsk := types.NewTipSetKey(cids...) + ts, err = node.Chainstore.LoadTipSet(tsk) + if err != nil { + return err + } + default: + return fmt.Errorf("expected 0 or 1 arguments") + } + _, err = node.CreateSim(cctx.Context, cctx.String("simulation"), ts) + return err + }, +} diff --git a/cmd/lotus-sim/delete.go b/cmd/lotus-sim/delete.go new file mode 100644 index 000000000..c19b3d27d --- /dev/null +++ b/cmd/lotus-sim/delete.go @@ -0,0 +1,22 @@ +package main + +import ( + "github.com/urfave/cli/v2" +) + +var deleteSimCommand = &cli.Command{ + Name: "delete", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + return node.DeleteSim(cctx.Context, cctx.String("simulation")) + }, +} diff --git a/cmd/lotus-sim/info.go b/cmd/lotus-sim/info.go new file mode 100644 index 000000000..864adb3bc --- /dev/null +++ b/cmd/lotus-sim/info.go @@ -0,0 +1,110 @@ +package main + +import ( + "context" + "fmt" + "io" + "text/tabwriter" + "time" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" +) + +func getTotalPower(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet) (power.Claim, error) { + actor, err := sm.LoadActor(ctx, power.Address, ts) + if err != nil { + return power.Claim{}, err + } + state, err := power.Load(sm.ChainStore().ActorStore(ctx), actor) + if err != nil { + return power.Claim{}, err + } + return state.TotalPower() +} + +func printInfo(ctx context.Context, sim *simulation.Simulation, out io.Writer) error { + head := sim.GetHead() + start := sim.GetStart() + + powerNow, err := getTotalPower(ctx, sim.StateManager, head) + if err != nil { + return err + } + powerLookbackEpoch := head.Height() - builtin.EpochsInDay*2 + if powerLookbackEpoch < start.Height() { + powerLookbackEpoch = start.Height() + } + lookbackTs, err := sim.Node.Chainstore.GetTipsetByHeight(ctx, powerLookbackEpoch, head, false) + if err != nil { + return err + } + powerLookback, err := getTotalPower(ctx, sim.StateManager, lookbackTs) + if err != nil { + return err + } + // growth rate in size/day + growthRate := big.Div( + big.Mul(big.Sub(powerNow.RawBytePower, powerLookback.RawBytePower), + big.NewInt(builtin.EpochsInDay)), + big.NewInt(int64(head.Height()-lookbackTs.Height())), + ) + + tw := tabwriter.NewWriter(out, 8, 8, 1, ' ', 0) + + headEpoch := head.Height() + firstEpoch := start.Height() + 1 + + headTime := time.Unix(int64(head.MinTimestamp()), 0) + startTime := time.Unix(int64(start.MinTimestamp()), 0) + duration := headTime.Sub(startTime) + + fmt.Fprintf(tw, "Name:\t%s\n", sim.Name()) + fmt.Fprintf(tw, "Head:\t%s\n", head) + fmt.Fprintf(tw, "Start Epoch:\t%d\n", firstEpoch) + fmt.Fprintf(tw, "End Epoch:\t%d\n", headEpoch) + fmt.Fprintf(tw, "Length:\t%d\n", headEpoch-firstEpoch) + fmt.Fprintf(tw, "Start Date:\t%s\n", startTime) + fmt.Fprintf(tw, "End Date:\t%s\n", headTime) + fmt.Fprintf(tw, "Duration:\t%.2f day(s)\n", duration.Hours()/24) + fmt.Fprintf(tw, "Capacity:\t%s\n", types.SizeStr(powerNow.RawBytePower)) + fmt.Fprintf(tw, "Daily Capacity Growth:\t%s/day\n", types.SizeStr(growthRate)) + fmt.Fprintf(tw, "Network Version:\t%d\n", sim.GetNetworkVersion()) + return tw.Flush() +} + +var infoSimCommand = &cli.Command{ + Name: "info", + Description: "Output information about the simulation.", + Subcommands: []*cli.Command{ + infoCommitGasSimCommand, + infoMessageSizeSimCommand, + infoWindowPostBandwidthSimCommand, + infoCapacityGrowthSimCommand, + infoStateGrowthSimCommand, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + return printInfo(cctx.Context, sim, cctx.App.Writer) + }, +} diff --git a/cmd/lotus-sim/info_capacity.go b/cmd/lotus-sim/info_capacity.go new file mode 100644 index 000000000..4372ee34a --- /dev/null +++ b/cmd/lotus-sim/info_capacity.go @@ -0,0 +1,67 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +var infoCapacityGrowthSimCommand = &cli.Command{ + Name: "capacity-growth", + Description: "List daily capacity growth over the course of the simulation starting at the end.", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + firstEpoch := sim.GetStart().Height() + ts := sim.GetHead() + lastPower, err := getTotalPower(cctx.Context, sim.StateManager, ts) + if err != nil { + return err + } + lastHeight := ts.Height() + + for ts.Height() > firstEpoch && cctx.Err() == nil { + ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + if err != nil { + return err + } + newEpoch := ts.Height() + if newEpoch != firstEpoch && newEpoch+builtin.EpochsInDay > lastHeight { + continue + } + + newPower, err := getTotalPower(cctx.Context, sim.StateManager, ts) + if err != nil { + return err + } + + growthRate := big.Div( + big.Mul(big.Sub(lastPower.RawBytePower, newPower.RawBytePower), + big.NewInt(builtin.EpochsInDay)), + big.NewInt(int64(lastHeight-newEpoch)), + ) + lastPower = newPower + lastHeight = newEpoch + fmt.Fprintf(cctx.App.Writer, "%s/day\n", types.SizeStr(growthRate)) + } + return cctx.Err() + }, +} diff --git a/cmd/lotus-sim/info_commit.go b/cmd/lotus-sim/info_commit.go new file mode 100644 index 000000000..738fcde95 --- /dev/null +++ b/cmd/lotus-sim/info_commit.go @@ -0,0 +1,148 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "syscall" + + "github.com/streadway/quantile" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" + "github.com/filecoin-project/lotus/lib/stati" +) + +var infoCommitGasSimCommand = &cli.Command{ + Name: "commit-gas", + Description: "Output information about the gas for commits", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "lookback", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) (err error) { + log := func(f string, i ...interface{}) { + fmt.Fprintf(os.Stderr, f, i...) + } + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + go profileOnSignal(cctx, syscall.SIGUSR2) + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + var gasAgg, proofsAgg uint64 + var gasAggMax, proofsAggMax uint64 + var gasSingle, proofsSingle uint64 + + qpoints := []struct{ q, tol float64 }{ + {0.01, 0.0005}, + {0.05, 0.001}, + {0.20, 0.01}, + {0.25, 0.01}, + {0.30, 0.01}, + {0.40, 0.01}, + {0.45, 0.01}, + {0.50, 0.01}, + {0.60, 0.01}, + {0.80, 0.01}, + {0.95, 0.001}, + {0.99, 0.0005}, + } + estims := make([]quantile.Estimate, len(qpoints)) + for i, p := range qpoints { + estims[i] = quantile.Known(p.q, p.tol) + } + qua := quantile.New(estims...) + hist, err := stati.NewHistogram([]float64{ + 1, 3, 5, 7, 15, 30, 50, 100, 200, 400, 600, 700, 819}) + if err != nil { + return err + } + + err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func( + sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid, + messages []*simulation.AppliedMessage, + ) error { + for _, m := range messages { + if m.ExitCode != exitcode.Ok { + continue + } + if m.Method == miner.Methods.ProveCommitAggregate { + param := miner.ProveCommitAggregateParams{} + err := param.UnmarshalCBOR(bytes.NewReader(m.Params)) + if err != nil { + log("failed to decode params: %+v", err) + return nil + } + c, err := param.SectorNumbers.Count() + if err != nil { + log("failed to count sectors") + return nil + } + gasAgg += uint64(m.GasUsed) + proofsAgg += c + if c == 819 { + gasAggMax += uint64(m.GasUsed) + proofsAggMax += c + } + for i := uint64(0); i < c; i++ { + qua.Add(float64(c)) + } + hist.Observe(float64(c)) + } + + if m.Method == miner.Methods.ProveCommitSector { + gasSingle += uint64(m.GasUsed) + proofsSingle++ + qua.Add(1) + hist.Observe(1) + } + } + + return nil + }) + if err != nil { + return err + } + idealGassUsed := float64(gasAggMax) / float64(proofsAggMax) * float64(proofsAgg+proofsSingle) + + fmt.Printf("Gas usage efficiency in comparison to all 819: %f%%\n", 100*idealGassUsed/float64(gasAgg+gasSingle)) + + fmt.Printf("Proofs in singles: %d\n", proofsSingle) + fmt.Printf("Proofs in Aggs: %d\n", proofsAgg) + fmt.Printf("Proofs in Aggs(819): %d\n", proofsAggMax) + + fmt.Println() + fmt.Println("Quantiles of proofs in given aggregate size:") + for _, p := range qpoints { + fmt.Printf("%.0f%%\t%.0f\n", p.q*100, qua.Get(p.q)) + } + fmt.Println() + fmt.Println("Histogram of messages:") + fmt.Printf("Total\t%d\n", hist.Total()) + for i, b := range hist.Buckets[1:] { + fmt.Printf("%.0f\t%d\n", b, hist.Get(i)) + } + + return nil + }, +} diff --git a/cmd/lotus-sim/info_message.go b/cmd/lotus-sim/info_message.go new file mode 100644 index 000000000..33c45e728 --- /dev/null +++ b/cmd/lotus-sim/info_message.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "syscall" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" + "github.com/filecoin-project/lotus/lib/stati" + "github.com/ipfs/go-cid" + "github.com/streadway/quantile" + "github.com/urfave/cli/v2" +) + +var infoMessageSizeSimCommand = &cli.Command{ + Name: "message-size", + Description: "Output information about message size distribution", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "lookback", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + go profileOnSignal(cctx, syscall.SIGUSR2) + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + qpoints := []struct{ q, tol float64 }{ + {0.30, 0.01}, + {0.40, 0.01}, + {0.60, 0.01}, + {0.70, 0.01}, + {0.80, 0.01}, + {0.85, 0.01}, + {0.90, 0.01}, + {0.95, 0.001}, + {0.99, 0.0005}, + {0.999, 0.0001}, + } + estims := make([]quantile.Estimate, len(qpoints)) + for i, p := range qpoints { + estims[i] = quantile.Known(p.q, p.tol) + } + qua := quantile.New(estims...) + hist, err := stati.NewHistogram([]float64{ + 1 << 8, 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, + }) + if err != nil { + return err + } + + err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func( + sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid, + messages []*simulation.AppliedMessage, + ) error { + for _, m := range messages { + msgSize := float64(m.ChainLength()) + qua.Add(msgSize) + hist.Observe(msgSize) + } + + return nil + }) + if err != nil { + return err + } + fmt.Println("Quantiles of message sizes:") + for _, p := range qpoints { + fmt.Printf("%.1f%%\t%.0f\n", p.q*100, qua.Get(p.q)) + } + fmt.Println() + fmt.Println("Histogram of message sizes:") + fmt.Printf("Total\t%d\n", hist.Total()) + for i, b := range hist.Buckets[1:] { + fmt.Printf("%.0f\t%d\t%.1f%%\n", b, hist.Get(i), 100*hist.GetRatio(i)) + } + + return nil + }, +} diff --git a/cmd/lotus-sim/info_state.go b/cmd/lotus-sim/info_state.go new file mode 100644 index 000000000..5c9541513 --- /dev/null +++ b/cmd/lotus-sim/info_state.go @@ -0,0 +1,141 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "math" + "sync" + "sync/atomic" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +var infoStateGrowthSimCommand = &cli.Command{ + Name: "state-size", + Description: "List daily state size over the course of the simulation starting at the end.", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + // NOTE: This code is entirely read-bound. + store := node.Chainstore.StateBlockstore() + stateSize := func(ctx context.Context, c cid.Cid) (uint64, error) { + seen := cid.NewSet() + sema := make(chan struct{}, 40) + var lock sync.Mutex + var recSize func(cid.Cid) (uint64, error) + recSize = func(c cid.Cid) (uint64, error) { + // Not a part of the chain state. + if err := ctx.Err(); err != nil { + return 0, err + } + + lock.Lock() + visit := seen.Visit(c) + lock.Unlock() + // Already seen? + if !visit { + return 0, nil + } + + var links []cid.Cid + var totalSize uint64 + if err := store.View(c, func(data []byte) error { + totalSize += uint64(len(data)) + return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { + if c.Prefix().Codec != cid.DagCBOR { + return + } + + links = append(links, c) + }) + }); err != nil { + return 0, err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + cb := func(c cid.Cid) { + size, err := recSize(c) + if err != nil { + select { + case errCh <- err: + default: + } + return + } + atomic.AddUint64(&totalSize, size) + } + asyncCb := func(c cid.Cid) { + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sema }() + cb(c) + }() + } + for _, link := range links { + select { + case sema <- struct{}{}: + asyncCb(link) + default: + cb(link) + } + + } + wg.Wait() + + select { + case err := <-errCh: + return 0, err + default: + } + + return totalSize, nil + } + return recSize(c) + } + + firstEpoch := sim.GetStart().Height() + ts := sim.GetHead() + lastHeight := abi.ChainEpoch(math.MaxInt64) + for ts.Height() > firstEpoch && cctx.Err() == nil { + if ts.Height()+builtin.EpochsInDay <= lastHeight { + lastHeight = ts.Height() + + parentStateSize, err := stateSize(cctx.Context, ts.ParentState()) + if err != nil { + return err + } + + fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize))) + } + + ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + if err != nil { + return err + } + } + return cctx.Err() + }, +} diff --git a/cmd/lotus-sim/info_wdpost.go b/cmd/lotus-sim/info_wdpost.go new file mode 100644 index 000000000..719a133b1 --- /dev/null +++ b/cmd/lotus-sim/info_wdpost.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" +) + +var infoWindowPostBandwidthSimCommand = &cli.Command{ + Name: "post-bandwidth", + Description: "List average chain bandwidth used by window posts for each day of the simulation.", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + + var postGas, totalGas int64 + printStats := func() { + fmt.Fprintf(cctx.App.Writer, "%.4f%%\n", float64(100*postGas)/float64(totalGas)) + } + idx := 0 + err = sim.Walk(cctx.Context, 0, func( + sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid, + messages []*simulation.AppliedMessage, + ) error { + for _, m := range messages { + totalGas += m.GasUsed + if m.ExitCode != exitcode.Ok { + continue + } + if m.Method == miner.Methods.SubmitWindowedPoSt { + postGas += m.GasUsed + } + } + idx++ + idx %= builtin.EpochsInDay + if idx == 0 { + printStats() + postGas = 0 + totalGas = 0 + } + return nil + }) + if idx > 0 { + printStats() + } + return err + }, +} diff --git a/cmd/lotus-sim/list.go b/cmd/lotus-sim/list.go new file mode 100644 index 000000000..37e767b9a --- /dev/null +++ b/cmd/lotus-sim/list.go @@ -0,0 +1,38 @@ +package main + +import ( + "fmt" + "text/tabwriter" + + "github.com/urfave/cli/v2" +) + +var listSimCommand = &cli.Command{ + Name: "list", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + list, err := node.ListSims(cctx.Context) + if err != nil { + return err + } + tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0) + for _, name := range list { + sim, err := node.LoadSim(cctx.Context, name) + if err != nil { + return err + } + head := sim.GetHead() + fmt.Fprintf(tw, "%s\t%s\t%s\n", name, head.Height(), head.Key()) + } + return tw.Flush() + }, +} diff --git a/cmd/lotus-sim/main.go b/cmd/lotus-sim/main.go new file mode 100644 index 000000000..e6cd5d993 --- /dev/null +++ b/cmd/lotus-sim/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/urfave/cli/v2" + + logging "github.com/ipfs/go-log/v2" +) + +var root []*cli.Command = []*cli.Command{ + createSimCommand, + deleteSimCommand, + copySimCommand, + renameSimCommand, + listSimCommand, + + runSimCommand, + infoSimCommand, + upgradeCommand, +} + +func main() { + if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set { + _ = logging.SetLogLevel("simulation", "DEBUG") + _ = logging.SetLogLevel("simulation-mock", "DEBUG") + } + app := &cli.App{ + Name: "lotus-sim", + Usage: "A tool to simulate a network.", + Commands: root, + Writer: os.Stdout, + ErrWriter: os.Stderr, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + EnvVars: []string{"LOTUS_PATH"}, + Hidden: true, + Value: "~/.lotus", + }, + &cli.StringFlag{ + Name: "simulation", + Aliases: []string{"sim"}, + EnvVars: []string{"LOTUS_SIMULATION"}, + Value: "default", + }, + }, + } + + ctx, cancel := signal.NotifyContext(context.Background(), + syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP) + defer cancel() + + if err := app.RunContext(ctx, os.Args); err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + return + } +} diff --git a/cmd/lotus-sim/profile.go b/cmd/lotus-sim/profile.go new file mode 100644 index 000000000..63e0ef3bd --- /dev/null +++ b/cmd/lotus-sim/profile.go @@ -0,0 +1,94 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "path/filepath" + "runtime/pprof" + "time" + + "github.com/urfave/cli/v2" +) + +func takeProfiles(ctx context.Context) (fname string, _err error) { + dir, err := os.MkdirTemp(".", ".profiles-temp*") + if err != nil { + return "", err + } + + if err := writeProfiles(ctx, dir); err != nil { + _ = os.RemoveAll(dir) + return "", err + } + + fname = fmt.Sprintf("pprof-simulation-%s", time.Now().Format(time.RFC3339)) + if err := os.Rename(dir, fname); err != nil { + _ = os.RemoveAll(dir) + return "", err + } + return fname, nil +} + +func writeProfiles(ctx context.Context, dir string) error { + for _, profile := range pprof.Profiles() { + file, err := os.Create(filepath.Join(dir, profile.Name()+".pprof.gz")) + if err != nil { + return err + } + if err := profile.WriteTo(file, 0); err != nil { + _ = file.Close() + return err + } + if err := file.Close(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + } + + file, err := os.Create(filepath.Join(dir, "cpu.pprof.gz")) + if err != nil { + return err + } + + if err := pprof.StartCPUProfile(file); err != nil { + _ = file.Close() + return err + } + select { + case <-time.After(30 * time.Second): + case <-ctx.Done(): + } + pprof.StopCPUProfile() + err = file.Close() + if err := ctx.Err(); err != nil { + return err + } + return err +} + +func profileOnSignal(cctx *cli.Context, signals ...os.Signal) { + ch := make(chan os.Signal, 1) + signal.Notify(ch, signals...) + defer signal.Stop(ch) + + for { + select { + case <-ch: + fname, err := takeProfiles(cctx.Context) + switch err { + case context.Canceled: + return + case nil: + fmt.Fprintf(cctx.App.ErrWriter, "Wrote profile to %q\n", fname) + default: + fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to write profile: %s\n", err) + } + case <-cctx.Done(): + return + } + } +} diff --git a/cmd/lotus-sim/rename.go b/cmd/lotus-sim/rename.go new file mode 100644 index 000000000..c336717c7 --- /dev/null +++ b/cmd/lotus-sim/rename.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" +) + +var renameSimCommand = &cli.Command{ + Name: "rename", + ArgsUsage: "", + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + if cctx.NArg() != 1 { + return fmt.Errorf("expected 1 argument") + } + name := cctx.Args().First() + return node.RenameSim(cctx.Context, cctx.String("simulation"), name) + }, +} diff --git a/cmd/lotus-sim/run.go b/cmd/lotus-sim/run.go new file mode 100644 index 000000000..a985fdf9e --- /dev/null +++ b/cmd/lotus-sim/run.go @@ -0,0 +1,72 @@ +package main + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/urfave/cli/v2" +) + +var runSimCommand = &cli.Command{ + Name: "run", + Description: `Run the simulation. + +Signals: +- SIGUSR1: Print information about the current simulation (equivalent to 'lotus-sim info'). +- SIGUSR2: Write pprof profiles to ./pprof-simulation-$DATE/`, + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "epochs", + Usage: "Advance the given number of epochs then stop.", + }, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + go profileOnSignal(cctx, syscall.SIGUSR2) + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + targetEpochs := cctx.Int("epochs") + + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGUSR1) + defer signal.Stop(ch) + + for i := 0; targetEpochs == 0 || i < targetEpochs; i++ { + ts, err := sim.Step(cctx.Context) + if err != nil { + return err + } + + fmt.Fprintf(cctx.App.Writer, "advanced to %d %s\n", ts.Height(), ts.Key()) + + // Print + select { + case <-ch: + fmt.Fprintln(cctx.App.Writer, "---------------------") + if err := printInfo(cctx.Context, sim, cctx.App.Writer); err != nil { + fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to print info: %s\n", err) + } + fmt.Fprintln(cctx.App.Writer, "---------------------") + case <-cctx.Context.Done(): + return cctx.Err() + default: + } + } + fmt.Fprintln(cctx.App.Writer, "simulation done") + return err + }, +} diff --git a/cmd/lotus-sim/simulation/block.go b/cmd/lotus-sim/simulation/block.go new file mode 100644 index 000000000..93e6a3191 --- /dev/null +++ b/cmd/lotus-sim/simulation/block.go @@ -0,0 +1,93 @@ +package simulation + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" +) + +const beaconPrefix = "mockbeacon:" + +// nextBeaconEntries returns a fake beacon entries for the next block. +func (sim *Simulation) nextBeaconEntries() []types.BeaconEntry { + parentBeacons := sim.head.Blocks()[0].BeaconEntries + lastBeacon := parentBeacons[len(parentBeacons)-1] + beaconRound := lastBeacon.Round + 1 + + buf := make([]byte, len(beaconPrefix)+8) + copy(buf, beaconPrefix) + binary.BigEndian.PutUint64(buf[len(beaconPrefix):], beaconRound) + beaconRand := sha256.Sum256(buf) + return []types.BeaconEntry{{ + Round: beaconRound, + Data: beaconRand[:], + }} +} + +// nextTicket returns a fake ticket for the next block. +func (sim *Simulation) nextTicket() *types.Ticket { + newProof := sha256.Sum256(sim.head.MinTicket().VRFProof) + return &types.Ticket{ + VRFProof: newProof[:], + } +} + +// makeTipSet generates and executes the next tipset from the given messages. This method: +// +// 1. Stores the given messages in the Chainstore. +// 2. Creates and persists a single block mined by the same miner as the parent. +// 3. Creates a tipset from this block and executes it. +// 4. Returns the resulting tipset. +// +// This method does _not_ mutate local state (although it does add blocks to the datastore). +func (sim *Simulation) makeTipSet(ctx context.Context, messages []*types.Message) (*types.TipSet, error) { + parentTs := sim.head + parentState, parentRec, err := sim.StateManager.TipSetState(ctx, parentTs) + if err != nil { + return nil, xerrors.Errorf("failed to compute parent tipset: %w", err) + } + msgsCid, err := sim.storeMessages(ctx, messages) + if err != nil { + return nil, xerrors.Errorf("failed to store block messages: %w", err) + } + + uts := parentTs.MinTimestamp() + build.BlockDelaySecs + + blks := []*types.BlockHeader{{ + Miner: parentTs.MinTicketBlock().Miner, // keep reusing the same miner. + Ticket: sim.nextTicket(), + BeaconEntries: sim.nextBeaconEntries(), + Parents: parentTs.Cids(), + Height: parentTs.Height() + 1, + ParentStateRoot: parentState, + ParentMessageReceipts: parentRec, + Messages: msgsCid, + ParentBaseFee: abi.NewTokenAmount(0), + Timestamp: uts, + ElectionProof: &types.ElectionProof{WinCount: 1}, + }} + err = sim.Node.Chainstore.PersistBlockHeaders(blks...) + if err != nil { + return nil, xerrors.Errorf("failed to persist block headers: %w", err) + } + newTipSet, err := types.NewTipSet(blks) + if err != nil { + return nil, xerrors.Errorf("failed to create new tipset: %w", err) + } + now := time.Now() + _, _, err = sim.StateManager.TipSetState(ctx, newTipSet) + if err != nil { + return nil, xerrors.Errorf("failed to compute new tipset: %w", err) + } + duration := time.Since(now) + log.Infow("computed tipset", "duration", duration, "height", newTipSet.Height()) + + return newTipSet, nil +} diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go new file mode 100644 index 000000000..2ffc0bf14 --- /dev/null +++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go @@ -0,0 +1,280 @@ +package blockbuilder + +import ( + "context" + "math" + + "go.uber.org/zap" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/account" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" +) + +const ( + // 0.25 is the default, but the number below is from the network. + gasOverestimation = 1.0 / 0.808 + // The number of expected blocks in a tipset. We use this to determine how much gas a tipset + // has. + // 5 per tipset, but we effectively get 4 blocks worth of messages. + expectedBlocks = 4 + // TODO: This will produce invalid blocks but it will accurately model the amount of gas + // we're willing to use per-tipset. + // A more correct approach would be to produce 5 blocks. We can do that later. + targetGas = build.BlockGasTarget * expectedBlocks +) + +type BlockBuilder struct { + ctx context.Context + logger *zap.SugaredLogger + + parentTs *types.TipSet + parentSt *state.StateTree + vm *vm.VM + sm *stmgr.StateManager + + gasTotal int64 + messages []*types.Message +} + +// NewBlockBuilder constructs a new block builder from the parent state. Use this to pack a block +// with messages. +// +// NOTE: The context applies to the life of the block builder itself (but does not need to be canceled). +func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.StateManager, parentTs *types.TipSet) (*BlockBuilder, error) { + parentState, _, err := sm.TipSetState(ctx, parentTs) + if err != nil { + return nil, err + } + parentSt, err := sm.StateTree(parentState) + if err != nil { + return nil, err + } + + bb := &BlockBuilder{ + ctx: ctx, + logger: logger.With("epoch", parentTs.Height()+1), + sm: sm, + parentTs: parentTs, + parentSt: parentSt, + } + + // Then we construct a VM to execute messages for gas estimation. + // + // Most parts of this VM are "real" except: + // 1. We don't charge a fee. + // 2. The runtime has "fake" proof logic. + // 3. We don't actually save any of the results. + r := store.NewChainRand(sm.ChainStore(), parentTs.Cids()) + vmopt := &vm.VMOpts{ + StateBase: parentState, + Epoch: parentTs.Height() + 1, + Rand: r, + Bstore: sm.ChainStore().StateBlockstore(), + Syscalls: sm.ChainStore().VMSys(), + CircSupplyCalc: sm.GetVMCirculatingSupply, + NtwkVersion: sm.GetNtwkVersion, + BaseFee: abi.NewTokenAmount(0), + LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs), + } + bb.vm, err = vm.NewVM(bb.ctx, vmopt) + if err != nil { + return nil, err + } + return bb, nil +} + +// PushMessages tries to push the specified message into the block. +// +// 1. All messages will be executed in-order. +// 2. Gas computation & nonce selection will be handled internally. +// 3. The base-fee is 0 so the sender does not need funds. +// 4. As usual, the sender must be an account (any account). +// 5. If the message fails to execute, this method will fail. +// +// Returns ErrOutOfGas when out of gas. Check BlockBuilder.GasRemaining and try pushing a cheaper +// message. +func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt, error) { + if bb.gasTotal >= targetGas { + return nil, new(ErrOutOfGas) + } + + st := bb.StateTree() + store := bb.ActorStore() + + // Copy the message before we start mutating it. + msgCpy := *msg + msg = &msgCpy + + actor, err := st.GetActor(msg.From) + if err != nil { + return nil, err + } + if !builtin.IsAccountActor(actor.Code) { + return nil, xerrors.Errorf( + "messags may only be sent from account actors, got message from %s (%s)", + msg.From, builtin.ActorNameByCode(actor.Code), + ) + } + msg.Nonce = actor.Nonce + if msg.From.Protocol() == address.ID { + state, err := account.Load(store, actor) + if err != nil { + return nil, err + } + msg.From, err = state.PubkeyAddress() + if err != nil { + return nil, err + } + } + + // TODO: Our gas estimation is broken for payment channels due to horrible hacks in + // gasEstimateGasLimit. + if msg.Value == types.EmptyInt { + msg.Value = abi.NewTokenAmount(0) + } + msg.GasPremium = abi.NewTokenAmount(0) + msg.GasFeeCap = abi.NewTokenAmount(0) + msg.GasLimit = build.BlockGasTarget + + // We manually snapshot so we can revert nonce changes, etc. on failure. + err = st.Snapshot(bb.ctx) + if err != nil { + return nil, xerrors.Errorf("failed to take a snapshot while estimating message gas: %w", err) + } + defer st.ClearSnapshot() + + ret, err := bb.vm.ApplyMessage(bb.ctx, msg) + if err != nil { + _ = st.Revert() + return nil, err + } + if ret.ActorErr != nil { + _ = st.Revert() + return nil, ret.ActorErr + } + + // Sometimes there are bugs. Let's catch them. + if ret.GasUsed == 0 { + _ = st.Revert() + return nil, xerrors.Errorf("used no gas %v -> %v", msg, ret) + } + + // Update the gas limit taking overestimation into account. + msg.GasLimit = int64(math.Ceil(float64(ret.GasUsed) * gasOverestimation)) + + // Did we go over? Yes, revert. + newTotal := bb.gasTotal + msg.GasLimit + if newTotal > targetGas { + _ = st.Revert() + return nil, &ErrOutOfGas{Available: targetGas - bb.gasTotal, Required: msg.GasLimit} + } + bb.gasTotal = newTotal + + bb.messages = append(bb.messages, msg) + return &ret.MessageReceipt, nil +} + +// ActorStore returns the VM's current (pending) blockstore. +func (bb *BlockBuilder) ActorStore() adt.Store { + return bb.vm.ActorStore(bb.ctx) +} + +// StateTree returns the VM's current (pending) state-tree. This includes any changes made by +// successfully pushed messages. +// +// You probably want ParentStateTree +func (bb *BlockBuilder) StateTree() *state.StateTree { + return bb.vm.StateTree().(*state.StateTree) +} + +// ParentStateTree returns the parent state-tree (not the paren't tipset's parent state-tree). +func (bb *BlockBuilder) ParentStateTree() *state.StateTree { + return bb.parentSt +} + +// StateTreeByHeight will return a state-tree up through and including the current in-progress +// epoch. +// +// NOTE: This will return the state after the given epoch, not the parent state for the epoch. +func (bb *BlockBuilder) StateTreeByHeight(epoch abi.ChainEpoch) (*state.StateTree, error) { + now := bb.Height() + if epoch > now { + return nil, xerrors.Errorf( + "cannot load state-tree from future: %d > %d", epoch, bb.Height(), + ) + } else if epoch <= 0 { + return nil, xerrors.Errorf( + "cannot load state-tree: epoch %d <= 0", epoch, + ) + } + + // Manually handle "now" and "previous". + switch epoch { + case now: + return bb.StateTree(), nil + case now - 1: + return bb.ParentStateTree(), nil + } + + // Get the tipset of the block _after_ the target epoch so we can use its parent state. + targetTs, err := bb.sm.ChainStore().GetTipsetByHeight(bb.ctx, epoch+1, bb.parentTs, false) + if err != nil { + return nil, err + } + + return bb.sm.StateTree(targetTs.ParentState()) +} + +// Messages returns all messages currently packed into the next block. +// 1. DO NOT modify the slice, copy it. +// 2. DO NOT retain the slice, copy it. +func (bb *BlockBuilder) Messages() []*types.Message { + return bb.messages +} + +// GasRemaining returns the amount of remaining gas in the next block. +func (bb *BlockBuilder) GasRemaining() int64 { + return targetGas - bb.gasTotal +} + +// ParentTipSet returns the parent tipset. +func (bb *BlockBuilder) ParentTipSet() *types.TipSet { + return bb.parentTs +} + +// Height returns the epoch for the target block. +func (bb *BlockBuilder) Height() abi.ChainEpoch { + return bb.parentTs.Height() + 1 +} + +// NetworkVersion returns the network version for the target block. +func (bb *BlockBuilder) NetworkVersion() network.Version { + return bb.sm.GetNtwkVersion(bb.ctx, bb.Height()) +} + +// StateManager returns the stmgr.StateManager. +func (bb *BlockBuilder) StateManager() *stmgr.StateManager { + return bb.sm +} + +// ActorsVersion returns the actors version for the target block. +func (bb *BlockBuilder) ActorsVersion() actors.Version { + return actors.VersionForNetwork(bb.NetworkVersion()) +} + +func (bb *BlockBuilder) L() *zap.SugaredLogger { + return bb.logger +} diff --git a/cmd/lotus-sim/simulation/blockbuilder/errors.go b/cmd/lotus-sim/simulation/blockbuilder/errors.go new file mode 100644 index 000000000..ddf08ea18 --- /dev/null +++ b/cmd/lotus-sim/simulation/blockbuilder/errors.go @@ -0,0 +1,25 @@ +package blockbuilder + +import ( + "errors" + "fmt" +) + +// ErrOutOfGas is returned from BlockBuilder.PushMessage when the block does not have enough gas to +// fit the given message. +type ErrOutOfGas struct { + Available, Required int64 +} + +func (e *ErrOutOfGas) Error() string { + if e.Available == 0 { + return "out of gas: block full" + } + return fmt.Sprintf("out of gas: %d < %d", e.Required, e.Available) +} + +// IsOutOfGas returns true if the error is an "out of gas" error. +func IsOutOfGas(err error) bool { + var oog *ErrOutOfGas + return errors.As(err, &oog) +} diff --git a/cmd/lotus-sim/simulation/messages.go b/cmd/lotus-sim/simulation/messages.go new file mode 100644 index 000000000..5bed27436 --- /dev/null +++ b/cmd/lotus-sim/simulation/messages.go @@ -0,0 +1,58 @@ +package simulation + +import ( + "context" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/types" +) + +// toArray converts the given set of CIDs to an AMT. This is usually used to pack messages into blocks. +func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) { + arr := blockadt.MakeEmptyArray(store) + for i, c := range cids { + oc := cbg.CborCid(c) + if err := arr.Set(uint64(i), &oc); err != nil { + return cid.Undef, err + } + } + return arr.Root() +} + +// storeMessages packs a set of messages into a types.MsgMeta and returns the resulting CID. The +// resulting CID is valid for the BlocKHeader's Messages field. +func (sim *Simulation) storeMessages(ctx context.Context, messages []*types.Message) (cid.Cid, error) { + // We store all messages as "bls" messages so they're executed in-order. This ensures + // accurate gas accounting. It also ensures we don't, e.g., try to fund a miner after we + // fail a pre-commit... + var msgCids []cid.Cid + for _, msg := range messages { + c, err := sim.Node.Chainstore.PutMessage(msg) + if err != nil { + return cid.Undef, err + } + msgCids = append(msgCids, c) + } + adtStore := sim.Node.Chainstore.ActorStore(ctx) + blsMsgArr, err := toArray(adtStore, msgCids) + if err != nil { + return cid.Undef, err + } + sekpMsgArr, err := toArray(adtStore, nil) + if err != nil { + return cid.Undef, err + } + + msgsCid, err := adtStore.Put(adtStore.Context(), &types.MsgMeta{ + BlsMessages: blsMsgArr, + SecpkMessages: sekpMsgArr, + }) + if err != nil { + return cid.Undef, err + } + return msgsCid, nil +} diff --git a/cmd/lotus-sim/simulation/mock/mock.go b/cmd/lotus-sim/simulation/mock/mock.go new file mode 100644 index 000000000..38648f758 --- /dev/null +++ b/cmd/lotus-sim/simulation/mock/mock.go @@ -0,0 +1,179 @@ +package mock + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + tutils "github.com/filecoin-project/specs-actors/v5/support/testing" + + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" +) + +// Ideally, we'd use extern/sector-storage/mock. Unfortunately, those mocks are a bit _too_ accurate +// and would force us to load sector info for window post proofs. + +const ( + mockSealProofPrefix = "valid seal proof:" + mockAggregateSealProofPrefix = "valid aggregate seal proof:" + mockPoStProofPrefix = "valid post proof:" +) + +var log = logging.Logger("simulation-mock") + +// mockVerifier is a simple mock for verifying "fake" proofs. +type mockVerifier struct{} + +var Verifier ffiwrapper.Verifier = mockVerifier{} + +func (mockVerifier) VerifySeal(proof proof5.SealVerifyInfo) (bool, error) { + addr, err := address.NewIDAddress(uint64(proof.Miner)) + if err != nil { + return false, err + } + mockProof, err := MockSealProof(proof.SealProof, addr) + if err != nil { + return false, err + } + if bytes.Equal(proof.Proof, mockProof) { + return true, nil + } + log.Debugw("invalid seal proof", "expected", mockProof, "actual", proof.Proof, "miner", addr) + return false, nil +} + +func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + addr, err := address.NewIDAddress(uint64(aggregate.Miner)) + if err != nil { + return false, err + } + mockProof, err := MockAggregateSealProof(aggregate.SealProof, addr, len(aggregate.Infos)) + if err != nil { + return false, err + } + if bytes.Equal(aggregate.Proof, mockProof) { + return true, nil + } + log.Debugw("invalid aggregate seal proof", + "expected", mockProof, + "actual", aggregate.Proof, + "count", len(aggregate.Infos), + "miner", addr, + ) + return false, nil +} +func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { + panic("should not be called") +} +func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { + if len(info.Proofs) != 1 { + return false, fmt.Errorf("expected exactly one proof") + } + proof := info.Proofs[0] + addr, err := address.NewIDAddress(uint64(info.Prover)) + if err != nil { + return false, err + } + mockProof, err := MockWindowPoStProof(proof.PoStProof, addr) + if err != nil { + return false, err + } + if bytes.Equal(proof.ProofBytes, mockProof) { + return true, nil + } + + log.Debugw("invalid window post proof", + "expected", mockProof, + "actual", info.Proofs[0], + "miner", addr, + ) + return false, nil +} + +func (mockVerifier) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) { + panic("should not be called") +} + +// MockSealProof generates a mock "seal" proof tied to the specified proof type and the given miner. +func MockSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address) ([]byte, error) { + plen, err := proofType.ProofSize() + if err != nil { + return nil, err + } + proof := make([]byte, plen) + i := copy(proof, mockSealProofPrefix) + binary.BigEndian.PutUint64(proof[i:], uint64(proofType)) + i += 8 + i += copy(proof[i:], minerAddr.Bytes()) + return proof, nil +} + +// MockAggregateSealProof generates a mock "seal" aggregate proof tied to the specified proof type, +// the given miner, and the number of proven sectors. +func MockAggregateSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address, count int) ([]byte, error) { + proof := make([]byte, aggProofLen(count)) + i := copy(proof, mockAggregateSealProofPrefix) + binary.BigEndian.PutUint64(proof[i:], uint64(proofType)) + i += 8 + binary.BigEndian.PutUint64(proof[i:], uint64(count)) + i += 8 + i += copy(proof[i:], minerAddr.Bytes()) + + return proof, nil +} + +// MockWindowPoStProof generates a mock "window post" proof tied to the specified proof type, and the +// given miner. +func MockWindowPoStProof(proofType abi.RegisteredPoStProof, minerAddr address.Address) ([]byte, error) { + plen, err := proofType.ProofSize() + if err != nil { + return nil, err + } + proof := make([]byte, plen) + i := copy(proof, mockPoStProofPrefix) + i += copy(proof[i:], minerAddr.Bytes()) + return proof, nil +} + +// makeCommR generates a "fake" but valid CommR for a sector. It is unique for the given sector/miner. +func MockCommR(minerAddr address.Address, sno abi.SectorNumber) cid.Cid { + return tutils.MakeCID(fmt.Sprintf("%s:%d", minerAddr, sno), &miner5.SealedCIDPrefix) +} + +// TODO: dedup +func aggProofLen(nproofs int) int { + switch { + case nproofs <= 8: + return 11220 + case nproofs <= 16: + return 14196 + case nproofs <= 32: + return 17172 + case nproofs <= 64: + return 20148 + case nproofs <= 128: + return 23124 + case nproofs <= 256: + return 26100 + case nproofs <= 512: + return 29076 + case nproofs <= 1024: + return 32052 + case nproofs <= 2048: + return 35028 + case nproofs <= 4096: + return 38004 + case nproofs <= 8192: + return 40980 + default: + panic("too many proofs") + } +} diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go new file mode 100644 index 000000000..5b8bf2bf9 --- /dev/null +++ b/cmd/lotus-sim/simulation/node.go @@ -0,0 +1,241 @@ +package simulation + +import ( + "context" + "strings" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" + "github.com/filecoin-project/lotus/node/repo" +) + +// Node represents the local lotus node, or at least the part of it we care about. +type Node struct { + repo repo.LockedRepo + Blockstore blockstore.Blockstore + MetadataDS datastore.Batching + Chainstore *store.ChainStore +} + +// OpenNode opens the local lotus node for writing. This will fail if the node is online. +func OpenNode(ctx context.Context, path string) (*Node, error) { + r, err := repo.NewFS(path) + if err != nil { + return nil, err + } + + return NewNode(ctx, r) +} + +// NewNode constructs a new node from the given repo. +func NewNode(ctx context.Context, r repo.Repo) (nd *Node, _err error) { + lr, err := r.Lock(repo.FullNode) + if err != nil { + return nil, err + } + defer func() { + if _err != nil { + _ = lr.Close() + } + }() + + bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return nil, err + } + + ds, err := lr.Datastore(ctx, "/metadata") + if err != nil { + return nil, err + } + return &Node{ + repo: lr, + Chainstore: store.NewChainStore(bs, bs, ds, vm.Syscalls(mock.Verifier), nil), + MetadataDS: ds, + Blockstore: bs, + }, err +} + +// Close cleanly close the repo. Please call this on shutdown to make sure everything is flushed. +func (nd *Node) Close() error { + if nd.repo != nil { + return nd.repo.Close() + } + return nil +} + +// LoadSim loads +func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) { + stages, err := stages.DefaultPipeline() + if err != nil { + return nil, err + } + sim := &Simulation{ + Node: nd, + name: name, + stages: stages, + } + + sim.head, err = sim.loadNamedTipSet("head") + if err != nil { + return nil, err + } + sim.start, err = sim.loadNamedTipSet("start") + if err != nil { + return nil, err + } + + err = sim.loadConfig() + if err != nil { + return nil, xerrors.Errorf("failed to load config for simulation %s: %w", name, err) + } + + us, err := sim.config.upgradeSchedule() + if err != nil { + return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) + } + sim.StateManager, err = stmgr.NewStateManagerWithUpgradeSchedule(nd.Chainstore, us) + if err != nil { + return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) + } + return sim, nil +} + +// Create creates a new simulation. +// +// - This will fail if a simulation already exists with the given name. +// - Name must not contain a '/'. +func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) (*Simulation, error) { + if strings.Contains(name, "/") { + return nil, xerrors.Errorf("simulation name %q cannot contain a '/'", name) + } + stages, err := stages.DefaultPipeline() + if err != nil { + return nil, err + } + sim := &Simulation{ + name: name, + Node: nd, + StateManager: stmgr.NewStateManager(nd.Chainstore), + stages: stages, + } + if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil { + return nil, err + } else if has { + return nil, xerrors.Errorf("simulation named %s already exists", name) + } + + if err := sim.storeNamedTipSet("start", head); err != nil { + return nil, xerrors.Errorf("failed to set simulation start: %w", err) + } + + if err := sim.SetHead(head); err != nil { + return nil, err + } + + return sim, nil +} + +// ListSims lists all simulations. +func (nd *Node) ListSims(ctx context.Context) ([]string, error) { + prefix := simulationPrefix.ChildString("head").String() + items, err := nd.MetadataDS.Query(query.Query{ + Prefix: prefix, + KeysOnly: true, + Orders: []query.Order{query.OrderByKey{}}, + }) + if err != nil { + return nil, xerrors.Errorf("failed to list simulations: %w", err) + } + + defer func() { _ = items.Close() }() + + var names []string + for { + select { + case result, ok := <-items.Next(): + if !ok { + return names, nil + } + if result.Error != nil { + return nil, xerrors.Errorf("failed to retrieve next simulation: %w", result.Error) + } + names = append(names, strings.TrimPrefix(result.Key, prefix+"/")) + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +var simFields = []string{"head", "start", "config"} + +// DeleteSim deletes a simulation and all related metadata. +// +// NOTE: This function does not delete associated messages, blocks, or chain state. +func (nd *Node) DeleteSim(ctx context.Context, name string) error { + var err error + for _, field := range simFields { + key := simulationPrefix.ChildString(field).ChildString(name) + err = multierr.Append(err, nd.MetadataDS.Delete(key)) + } + return err +} + +// CopySim copies a simulation. +func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { + if strings.Contains(newName, "/") { + return xerrors.Errorf("simulation name %q cannot contain a '/'", newName) + } + if strings.Contains(oldName, "/") { + return xerrors.Errorf("simulation name %q cannot contain a '/'", oldName) + } + + values := make(map[string][]byte) + for _, field := range simFields { + key := simulationPrefix.ChildString(field).ChildString(oldName) + value, err := nd.MetadataDS.Get(key) + if err == datastore.ErrNotFound { + continue + } else if err != nil { + return err + } + values[field] = value + } + + if _, ok := values["head"]; !ok { + return xerrors.Errorf("simulation named %s not found", oldName) + } + + for _, field := range simFields { + key := simulationPrefix.ChildString(field).ChildString(newName) + var err error + if value, ok := values[field]; ok { + err = nd.MetadataDS.Put(key, value) + } else { + err = nd.MetadataDS.Delete(key) + } + if err != nil { + return err + } + } + return nil +} + +// RenameSim renames a simulation. +func (nd *Node) RenameSim(ctx context.Context, oldName, newName string) error { + if err := nd.CopySim(ctx, oldName, newName); err != nil { + return err + } + return nd.DeleteSim(ctx, oldName) +} diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go new file mode 100644 index 000000000..d91d30eda --- /dev/null +++ b/cmd/lotus-sim/simulation/simulation.go @@ -0,0 +1,408 @@ +package simulation + +import ( + "context" + "encoding/json" + "runtime" + + "golang.org/x/sync/errgroup" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages" +) + +var log = logging.Logger("simulation") + +// config is the simulation's config, persisted to the local metadata store and loaded on start. +// +// See Simulation.loadConfig and Simulation.saveConfig. +type config struct { + Upgrades map[network.Version]abi.ChainEpoch +} + +// upgradeSchedule constructs an stmgr.StateManager upgrade schedule, overriding any network upgrade +// epochs as specified in the config. +func (c *config) upgradeSchedule() (stmgr.UpgradeSchedule, error) { + upgradeSchedule := stmgr.DefaultUpgradeSchedule() + expected := make(map[network.Version]struct{}, len(c.Upgrades)) + for nv := range c.Upgrades { + expected[nv] = struct{}{} + } + + // Update network upgrade epochs. + newUpgradeSchedule := upgradeSchedule[:0] + for _, upgrade := range upgradeSchedule { + if height, ok := c.Upgrades[upgrade.Network]; ok { + delete(expected, upgrade.Network) + if height < 0 { + continue + } + upgrade.Height = height + } + newUpgradeSchedule = append(newUpgradeSchedule, upgrade) + } + + // Make sure we didn't try to configure an unknown network version. + if len(expected) > 0 { + missing := make([]network.Version, 0, len(expected)) + for nv := range expected { + missing = append(missing, nv) + } + return nil, xerrors.Errorf("unknown network versions %v in config", missing) + } + + // Finally, validate it. This ensures we don't change the order of the upgrade or anything + // like that. + if err := newUpgradeSchedule.Validate(); err != nil { + return nil, err + } + return newUpgradeSchedule, nil +} + +// Simulation specifies a lotus-sim simulation. +type Simulation struct { + Node *Node + StateManager *stmgr.StateManager + + name string + config config + start *types.TipSet + + // head + head *types.TipSet + + stages []stages.Stage +} + +// loadConfig loads a simulation's config from the datastore. This must be called on startup and may +// be called to restore the config from-disk. +func (sim *Simulation) loadConfig() error { + configBytes, err := sim.Node.MetadataDS.Get(sim.key("config")) + if err == nil { + err = json.Unmarshal(configBytes, &sim.config) + } + switch err { + case nil: + case datastore.ErrNotFound: + sim.config = config{} + default: + return xerrors.Errorf("failed to load config: %w", err) + } + return nil +} + +// saveConfig saves the current config to the datastore. This must be called whenever the config is +// changed. +func (sim *Simulation) saveConfig() error { + buf, err := json.Marshal(sim.config) + if err != nil { + return err + } + return sim.Node.MetadataDS.Put(sim.key("config"), buf) +} + +var simulationPrefix = datastore.NewKey("/simulation") + +// key returns the the key in the form /simulation//. For example, +// /simulation/head/default. +func (sim *Simulation) key(subkey string) datastore.Key { + return simulationPrefix.ChildString(subkey).ChildString(sim.name) +} + +// loadNamedTipSet the tipset with the given name (for this simulation) +func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { + tskBytes, err := sim.Node.MetadataDS.Get(sim.key(name)) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset %s/%s: %w", sim.name, name, err) + } + tsk, err := types.TipSetKeyFromBytes(tskBytes) + if err != nil { + return nil, xerrors.Errorf("failed to parse tipste %v (%s/%s): %w", tskBytes, sim.name, name, err) + } + ts, err := sim.Node.Chainstore.LoadTipSet(tsk) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset %s (%s/%s): %w", tsk, sim.name, name, err) + } + return ts, nil +} + +// storeNamedTipSet stores the tipset at name (relative to the simulation). +func (sim *Simulation) storeNamedTipSet(name string, ts *types.TipSet) error { + if err := sim.Node.MetadataDS.Put(sim.key(name), ts.Key().Bytes()); err != nil { + return xerrors.Errorf("failed to store tipset (%s/%s): %w", sim.name, name, err) + } + return nil +} + +// GetHead returns the current simulation head. +func (sim *Simulation) GetHead() *types.TipSet { + return sim.head +} + +// GetStart returns simulation's parent tipset. +func (sim *Simulation) GetStart() *types.TipSet { + return sim.start +} + +// GetNetworkVersion returns the current network version for the simulation. +func (sim *Simulation) GetNetworkVersion() network.Version { + return sim.StateManager.GetNtwkVersion(context.TODO(), sim.head.Height()) +} + +// SetHead updates the current head of the simulation and stores it in the metadata store. This is +// called for every Simulation.Step. +func (sim *Simulation) SetHead(head *types.TipSet) error { + if err := sim.storeNamedTipSet("head", head); err != nil { + return err + } + sim.head = head + return nil +} + +// Name returns the simulation's name. +func (sim *Simulation) Name() string { + return sim.name +} + +// SetUpgradeHeight sets the height of the given network version change (and saves the config). +// +// This fails if the specified epoch has already passed or the new upgrade schedule is invalid. +func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch) (_err error) { + if epoch <= sim.head.Height() { + return xerrors.Errorf("cannot set upgrade height in the past (%d <= %d)", epoch, sim.head.Height()) + } + + if sim.config.Upgrades == nil { + sim.config.Upgrades = make(map[network.Version]abi.ChainEpoch, 1) + } + + sim.config.Upgrades[nv] = epoch + defer func() { + if _err != nil { + // try to restore the old config on error. + _ = sim.loadConfig() + } + }() + + newUpgradeSchedule, err := sim.config.upgradeSchedule() + if err != nil { + return err + } + sm, err := stmgr.NewStateManagerWithUpgradeSchedule(sim.Node.Chainstore, newUpgradeSchedule) + if err != nil { + return err + } + err = sim.saveConfig() + if err != nil { + return err + } + + sim.StateManager = sm + return nil +} + +// ListUpgrades returns any future network upgrades. +func (sim *Simulation) ListUpgrades() (stmgr.UpgradeSchedule, error) { + upgrades, err := sim.config.upgradeSchedule() + if err != nil { + return nil, err + } + var pending stmgr.UpgradeSchedule + for _, upgrade := range upgrades { + if upgrade.Height < sim.head.Height() { + continue + } + pending = append(pending, upgrade) + } + return pending, nil +} + +type AppliedMessage struct { + types.Message + types.MessageReceipt +} + +// Walk walks the simulation's chain from the current head back to the first tipset. +func (sim *Simulation) Walk( + ctx context.Context, + lookback int64, + cb func(sm *stmgr.StateManager, + ts *types.TipSet, + stCid cid.Cid, + messages []*AppliedMessage) error, +) error { + store := sim.Node.Chainstore.ActorStore(ctx) + minEpoch := sim.start.Height() + if lookback != 0 { + minEpoch = sim.head.Height() - abi.ChainEpoch(lookback) + } + + // Given tha loading messages and receipts can be a little bit slow, we do this in parallel. + // + // 1. We spin up some number of workers. + // 2. We hand tipsets to workers in round-robin order. + // 3. We pull "resolved" tipsets in the same round-robin order. + // 4. We serially call the callback in reverse-chain order. + // + // We have a buffer of size 1 for both resolved tipsets and unresolved tipsets. This should + // ensure that we never block unecessarily. + + type work struct { + ts *types.TipSet + stCid cid.Cid + recCid cid.Cid + } + type result struct { + ts *types.TipSet + stCid cid.Cid + messages []*AppliedMessage + } + + // This is more disk bound than CPU bound, but eh... + workerCount := runtime.NumCPU() * 2 + + workQs := make([]chan *work, workerCount) + resultQs := make([]chan *result, workerCount) + + for i := range workQs { + workQs[i] = make(chan *work, 1) + } + + for i := range resultQs { + resultQs[i] = make(chan *result, 1) + } + + grp, ctx := errgroup.WithContext(ctx) + + // Walk the chain and fire off work items. + grp.Go(func() error { + ts := sim.head + stCid, recCid, err := sim.StateManager.TipSetState(ctx, ts) + if err != nil { + return err + } + i := 0 + for ts.Height() > minEpoch { + if err := ctx.Err(); err != nil { + return ctx.Err() + } + + select { + case workQs[i] <- &work{ts, stCid, recCid}: + case <-ctx.Done(): + return ctx.Err() + } + + stCid = ts.MinTicketBlock().ParentStateRoot + recCid = ts.MinTicketBlock().ParentMessageReceipts + ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + if err != nil { + return xerrors.Errorf("loading parent: %w", err) + } + i = (i + 1) % workerCount + } + for _, q := range workQs { + close(q) + } + return nil + }) + + // Spin up one worker per queue pair. + for i := 0; i < workerCount; i++ { + workQ := workQs[i] + resultQ := resultQs[i] + grp.Go(func() error { + for { + if err := ctx.Err(); err != nil { + return ctx.Err() + } + + var job *work + var ok bool + select { + case job, ok = <-workQ: + case <-ctx.Done(): + return ctx.Err() + } + + if !ok { + break + } + + msgs, err := sim.Node.Chainstore.MessagesForTipset(job.ts) + if err != nil { + return err + } + + recs, err := blockadt.AsArray(store, job.recCid) + if err != nil { + return xerrors.Errorf("amt load: %w", err) + } + applied := make([]*AppliedMessage, len(msgs)) + var rec types.MessageReceipt + err = recs.ForEach(&rec, func(i int64) error { + applied[i] = &AppliedMessage{ + Message: *msgs[i].VMMessage(), + MessageReceipt: rec, + } + return nil + }) + if err != nil { + return err + } + select { + case resultQ <- &result{ + ts: job.ts, + stCid: job.stCid, + messages: applied, + }: + case <-ctx.Done(): + return ctx.Err() + } + } + close(resultQ) + return nil + }) + } + + // Process results in the same order we enqueued them. + grp.Go(func() error { + qs := resultQs + for len(qs) > 0 { + newQs := qs[:0] + for _, q := range qs { + if err := ctx.Err(); err != nil { + return ctx.Err() + } + select { + case r, ok := <-q: + if !ok { + continue + } + err := cb(sim.StateManager, r.ts, r.stCid, r.messages) + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + } + newQs = append(newQs, q) + } + qs = newQs + } + return nil + }) + + // Wait for everything to finish. + return grp.Wait() +} diff --git a/cmd/lotus-sim/simulation/stages/actor_iter.go b/cmd/lotus-sim/simulation/stages/actor_iter.go new file mode 100644 index 000000000..b2c14ebdb --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/actor_iter.go @@ -0,0 +1,38 @@ +package stages + +import ( + "math/rand" + + "github.com/filecoin-project/go-address" +) + +// actorIter is a simple persistent iterator that loops over a set of actors. +type actorIter struct { + actors []address.Address + offset int +} + +// shuffle randomly permutes the set of actors. +func (p *actorIter) shuffle() { + rand.Shuffle(len(p.actors), func(i, j int) { + p.actors[i], p.actors[j] = p.actors[j], p.actors[i] + }) +} + +// next returns the next actor's address and advances the iterator. +func (p *actorIter) next() address.Address { + next := p.actors[p.offset] + p.offset++ + p.offset %= len(p.actors) + return next +} + +// add adds a new actor to the iterator. +func (p *actorIter) add(addr address.Address) { + p.actors = append(p.actors, addr) +} + +// len returns the number of actors in the iterator. +func (p *actorIter) len() int { + return len(p.actors) +} diff --git a/cmd/lotus-sim/simulation/stages/commit_queue.go b/cmd/lotus-sim/simulation/stages/commit_queue.go new file mode 100644 index 000000000..d625dedb6 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/commit_queue.go @@ -0,0 +1,200 @@ +package stages + +import ( + "sort" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +// pendingCommitTracker tracks pending commits per-miner for a single epoch. +type pendingCommitTracker map[address.Address]minerPendingCommits + +// minerPendingCommits tracks a miner's pending commits during a single epoch (grouped by seal proof type). +type minerPendingCommits map[abi.RegisteredSealProof][]abi.SectorNumber + +// finish marks count sectors of the given proof type as "prove-committed". +func (m minerPendingCommits) finish(proof abi.RegisteredSealProof, count int) { + snos := m[proof] + if len(snos) < count { + panic("not enough sector numbers to finish") + } else if len(snos) == count { + delete(m, proof) + } else { + m[proof] = snos[count:] + } +} + +// empty returns true if there are no pending commits. +func (m minerPendingCommits) empty() bool { + return len(m) == 0 +} + +// count returns the number of pending commits. +func (m minerPendingCommits) count() int { + count := 0 + for _, snos := range m { + count += len(snos) + } + return count +} + +// commitQueue is used to track pending prove-commits. +// +// Miners are processed in round-robin where _all_ commits from a given miner are finished before +// moving on to the next. This is designed to maximize batching. +type commitQueue struct { + minerQueue []address.Address + queue []pendingCommitTracker + offset abi.ChainEpoch +} + +// ready returns the number of prove-commits ready to be proven at the current epoch. Useful for logging. +func (q *commitQueue) ready() int { + if len(q.queue) == 0 { + return 0 + } + count := 0 + for _, pending := range q.queue[0] { + count += pending.count() + } + return count +} + +// nextMiner returns the next miner to be proved and the set of pending prove commits for that +// miner. When some number of sectors have successfully been proven, call "finish" so we don't try +// to prove them again. +func (q *commitQueue) nextMiner() (address.Address, minerPendingCommits, bool) { + if len(q.queue) == 0 { + return address.Undef, nil, false + } + next := q.queue[0] + + // Go through the queue and find the first non-empty batch. + for len(q.minerQueue) > 0 { + addr := q.minerQueue[0] + q.minerQueue = q.minerQueue[1:] + pending := next[addr] + if !pending.empty() { + return addr, pending, true + } + delete(next, addr) + } + + return address.Undef, nil, false +} + +// advanceEpoch will advance to the next epoch. If some sectors were left unproven in the current +// epoch, they will be "prepended" into the next epochs sector set. +func (q *commitQueue) advanceEpoch(epoch abi.ChainEpoch) { + if epoch < q.offset { + panic("cannot roll epoch backwards") + } + // Now we "roll forwards", merging each epoch we advance over with the next. + for len(q.queue) > 1 && q.offset < epoch { + curr := q.queue[0] + q.queue[0] = nil + q.queue = q.queue[1:] + q.offset++ + + next := q.queue[0] + + // Cleanup empty entries. + for addr, pending := range curr { + if pending.empty() { + delete(curr, addr) + } + } + + // If the entire level is actually empty, just skip to the next one. + if len(curr) == 0 { + continue + } + + // Otherwise, merge the next into the current. + for addr, nextPending := range next { + currPending := curr[addr] + if currPending.empty() { + curr[addr] = nextPending + continue + } + for ty, nextSnos := range nextPending { + currSnos := currPending[ty] + if len(currSnos) == 0 { + currPending[ty] = nextSnos + continue + } + currPending[ty] = append(currSnos, nextSnos...) + } + } + // Now replace next with the merged curr. + q.queue[0] = curr + } + q.offset = epoch + if len(q.queue) == 0 { + return + } + + next := q.queue[0] + seenMiners := make(map[address.Address]struct{}, len(q.minerQueue)) + for _, addr := range q.minerQueue { + seenMiners[addr] = struct{}{} + } + + // Find the new miners not already in the queue. + offset := len(q.minerQueue) + for addr, pending := range next { + if pending.empty() { + delete(next, addr) + continue + } + if _, ok := seenMiners[addr]; ok { + continue + } + q.minerQueue = append(q.minerQueue, addr) + } + + // Sort the new miners only. + newMiners := q.minerQueue[offset:] + sort.Slice(newMiners, func(i, j int) bool { + // eh, escape analysis should be fine here... + return string(newMiners[i].Bytes()) < string(newMiners[j].Bytes()) + }) +} + +// enquueProveCommit enqueues prove-commit for the given pre-commit for the given miner. +func (q *commitQueue) enqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error { + // Compute the epoch at which we can start trying to commit. + preCommitDelay := policy.GetPreCommitChallengeDelay() + minCommitEpoch := preCommitEpoch + preCommitDelay + 1 + + // Figure out the offset in the queue. + i := int(minCommitEpoch - q.offset) + if i < 0 { + i = 0 + } + + // Expand capacity and insert. + if cap(q.queue) <= i { + pc := make([]pendingCommitTracker, i+1, preCommitDelay*2) + copy(pc, q.queue) + q.queue = pc + } else if len(q.queue) <= i { + q.queue = q.queue[:i+1] + } + tracker := q.queue[i] + if tracker == nil { + tracker = make(pendingCommitTracker) + q.queue[i] = tracker + } + minerPending := tracker[addr] + if minerPending == nil { + minerPending = make(minerPendingCommits) + tracker[addr] = minerPending + } + minerPending[info.SealProof] = append(minerPending[info.SealProof], info.SectorNumber) + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/commit_queue_test.go b/cmd/lotus-sim/simulation/stages/commit_queue_test.go new file mode 100644 index 000000000..8ab05250e --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/commit_queue_test.go @@ -0,0 +1,128 @@ +package stages + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +func TestCommitQueue(t *testing.T) { + var q commitQueue + addr1, err := address.NewIDAddress(1000) + require.NoError(t, err) + proofType := abi.RegisteredSealProof_StackedDrg64GiBV1_1 + require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 0, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 1, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 2, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 3, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 3, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 4, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 4, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 5, + })) + require.NoError(t, q.enqueueProveCommit(addr1, 6, miner.SectorPreCommitInfo{ + SealProof: proofType, + SectorNumber: 6, + })) + + epoch := abi.ChainEpoch(0) + q.advanceEpoch(epoch) + _, _, ok := q.nextMiner() + require.False(t, ok) + + epoch += policy.GetPreCommitChallengeDelay() + q.advanceEpoch(epoch) + _, _, ok = q.nextMiner() + require.False(t, ok) + + // 0 : empty + non-empty + epoch++ + q.advanceEpoch(epoch) + addr, sectors, ok := q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.Equal(t, addr, addr1) + sectors.finish(proofType, 1) + require.Equal(t, sectors.count(), 1) + require.EqualValues(t, []abi.SectorNumber{1}, sectors[proofType]) + + // 1 : non-empty + non-empty + epoch++ + q.advanceEpoch(epoch) + addr, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, addr, addr1) + require.Equal(t, sectors.count(), 3) + require.EqualValues(t, []abi.SectorNumber{1, 2, 3}, sectors[proofType]) + sectors.finish(proofType, 3) + require.Equal(t, sectors.count(), 0) + + // 2 : empty + empty + epoch++ + q.advanceEpoch(epoch) + _, _, ok = q.nextMiner() + require.False(t, ok) + + // 3 : empty + non-empty + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 1) + require.EqualValues(t, []abi.SectorNumber{4}, sectors[proofType]) + + // 4 : non-empty + non-empty + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType]) + + // 5 : empty + non-empty + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType]) + sectors.finish(proofType, 1) + require.EqualValues(t, []abi.SectorNumber{5}, sectors[proofType]) + + // 6 + epoch++ + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType]) + + // 8 + epoch += 2 + q.advanceEpoch(epoch) + _, sectors, ok = q.nextMiner() + require.True(t, ok) + require.Equal(t, sectors.count(), 2) + require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType]) +} diff --git a/cmd/lotus-sim/simulation/stages/funding_stage.go b/cmd/lotus-sim/simulation/stages/funding_stage.go new file mode 100644 index 000000000..f57f85293 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/funding_stage.go @@ -0,0 +1,318 @@ +package stages + +import ( + "bytes" + "context" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +var ( + TargetFunds = abi.TokenAmount(types.MustParseFIL("1000FIL")) + MinimumFunds = abi.TokenAmount(types.MustParseFIL("100FIL")) +) + +type FundingStage struct { + fundAccount address.Address + taxMin abi.TokenAmount + minFunds, maxFunds abi.TokenAmount +} + +func NewFundingStage() (*FundingStage, error) { + // TODO: make all this configurable. + addr, err := address.NewIDAddress(100) + if err != nil { + return nil, err + } + return &FundingStage{ + fundAccount: addr, + taxMin: abi.TokenAmount(types.MustParseFIL("1000FIL")), + minFunds: abi.TokenAmount(types.MustParseFIL("1000000FIL")), + maxFunds: abi.TokenAmount(types.MustParseFIL("100000000FIL")), + }, nil +} + +func (*FundingStage) Name() string { + return "funding" +} + +func (fs *FundingStage) Fund(bb *blockbuilder.BlockBuilder, target address.Address) error { + return fs.fund(bb, target, 0) +} + +// sendAndFund "packs" the given message, funding the actor if necessary. It: +// +// 1. Tries to send the given message. +// 2. If that fails, it checks to see if the exit code was ErrInsufficientFunds. +// 3. If so, it sends 1K FIL from the "burnt funds actor" (because we need to send it from +// somewhere) and re-tries the message.0 +func (fs *FundingStage) SendAndFund(bb *blockbuilder.BlockBuilder, msg *types.Message) (res *types.MessageReceipt, err error) { + for i := 0; i < 10; i++ { + res, err = bb.PushMessage(msg) + if err == nil { + return res, nil + } + aerr, ok := err.(aerrors.ActorError) + if !ok || aerr.RetCode() != exitcode.ErrInsufficientFunds { + return nil, err + } + + // Ok, insufficient funds. Let's fund this miner and try again. + if err := fs.fund(bb, msg.To, i); err != nil { + if !blockbuilder.IsOutOfGas(err) { + err = xerrors.Errorf("failed to fund %s: %w", msg.To, err) + } + return nil, err + } + } + return res, err +} + +// fund funds the target actor with 'TargetFunds << shift' FIL. The "shift" parameter allows us to +// keep doubling the amount until the intended operation succeeds. +func (fs *FundingStage) fund(bb *blockbuilder.BlockBuilder, target address.Address, shift int) error { + amt := TargetFunds + if shift > 0 { + if shift >= 8 { + shift = 8 // cap + } + amt = big.Lsh(amt, uint(shift)) + } + _, err := bb.PushMessage(&types.Message{ + From: fs.fundAccount, + To: target, + Value: amt, + Method: builtin.MethodSend, + }) + return err +} + +func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + st := bb.StateTree() + fundAccActor, err := st.GetActor(fs.fundAccount) + if err != nil { + return err + } + if fs.minFunds.LessThan(fundAccActor.Balance) { + return nil + } + + // Ok, we're going to go fund this thing. + start := time.Now() + + type actor struct { + types.Actor + Address address.Address + } + + var targets []*actor + err = st.ForEach(func(addr address.Address, act *types.Actor) error { + // Don't steal from ourselves! + if addr == fs.fundAccount { + return nil + } + if act.Balance.LessThan(fs.taxMin) { + return nil + } + if !(builtin.IsAccountActor(act.Code) || builtin.IsMultisigActor(act.Code)) { + return nil + } + targets = append(targets, &actor{*act, addr}) + return nil + }) + if err != nil { + return err + } + + balance := fundAccActor.Balance.Copy() + + sort.Slice(targets, func(i, j int) bool { + return targets[i].Balance.GreaterThan(targets[j].Balance) + }) + + store := bb.ActorStore() + epoch := bb.Height() + actorsVersion := bb.ActorsVersion() + + var accounts, multisigs int + defer func() { + if _err != nil { + return + } + bb.L().Infow("finished funding the simulation", + "duration", time.Since(start), + "targets", len(targets), + "epoch", epoch, + "new-balance", types.FIL(balance), + "old-balance", types.FIL(fundAccActor.Balance), + "multisigs", multisigs, + "accounts", accounts, + ) + }() + + for _, actor := range targets { + switch { + case builtin.IsAccountActor(actor.Code): + if _, err := bb.PushMessage(&types.Message{ + From: actor.Address, + To: fs.fundAccount, + Value: actor.Balance, + }); blockbuilder.IsOutOfGas(err) { + return nil + } else if err != nil { + return err + } + accounts++ + case builtin.IsMultisigActor(actor.Code): + msigState, err := multisig.Load(store, &actor.Actor) + if err != nil { + return err + } + + threshold, err := msigState.Threshold() + if err != nil { + return err + } + + if threshold > 16 { + bb.L().Debugw("ignoring multisig with high threshold", + "multisig", actor.Address, + "threshold", threshold, + "max", 16, + ) + continue + } + + locked, err := msigState.LockedBalance(epoch) + if err != nil { + return err + } + + if locked.LessThan(fs.taxMin) { + continue // not worth it. + } + + allSigners, err := msigState.Signers() + if err != nil { + return err + } + signers := make([]address.Address, 0, threshold) + for _, signer := range allSigners { + actor, err := st.GetActor(signer) + if err != nil { + return err + } + if !builtin.IsAccountActor(actor.Code) { + // I am so not dealing with this mess. + continue + } + if uint64(len(signers)) >= threshold { + break + } + } + // Ok, we're not dealing with this one. + if uint64(len(signers)) < threshold { + continue + } + + available := big.Sub(actor.Balance, locked) + + var txnId uint64 + { + msg, err := multisig.Message(actorsVersion, signers[0]).Propose( + actor.Address, fs.fundAccount, available, + builtin.MethodSend, nil, + ) + if err != nil { + return err + } + res, err := bb.PushMessage(msg) + if err != nil { + if blockbuilder.IsOutOfGas(err) { + err = nil + } + return err + } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(res.Return)) + if err != nil { + return err + } + if ret.Applied { + if !ret.Code.IsSuccess() { + bb.L().Errorw("failed to tax multisig", + "multisig", actor.Address, + "exitcode", ret.Code, + ) + } + break + } + txnId = uint64(ret.TxnID) + } + var ret multisig.ProposeReturn + for _, signer := range signers[1:] { + msg, err := multisig.Message(actorsVersion, signer).Approve(actor.Address, txnId, nil) + if err != nil { + return err + } + res, err := bb.PushMessage(msg) + if err != nil { + if blockbuilder.IsOutOfGas(err) { + err = nil + } + return err + } + var ret multisig.ProposeReturn + err = ret.UnmarshalCBOR(bytes.NewReader(res.Return)) + if err != nil { + return err + } + // A bit redundant, but nice. + if ret.Applied { + break + } + + } + if !ret.Applied { + bb.L().Errorw("failed to apply multisig transaction", + "multisig", actor.Address, + "txnid", txnId, + "signers", len(signers), + "threshold", threshold, + ) + continue + } + if !ret.Code.IsSuccess() { + bb.L().Errorw("failed to tax multisig", + "multisig", actor.Address, + "txnid", txnId, + "exitcode", ret.Code, + ) + } else { + multisigs++ + } + default: + panic("impossible case") + } + balance = big.Int{Int: balance.Add(balance.Int, actor.Balance.Int)} + if balance.GreaterThanEqual(fs.maxFunds) { + // There's no need to get greedy. + // Well, really, we're trying to avoid messing with state _too_ much. + return nil + } + } + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/interface.go b/cmd/lotus-sim/simulation/stages/interface.go new file mode 100644 index 000000000..0c40a9b23 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/interface.go @@ -0,0 +1,27 @@ +package stages + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +// Stage is a stage of the simulation. It's asked to pack messages for every block. +type Stage interface { + Name() string + PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) error +} + +type Funding interface { + SendAndFund(*blockbuilder.BlockBuilder, *types.Message) (*types.MessageReceipt, error) + Fund(*blockbuilder.BlockBuilder, address.Address) error +} + +type Committer interface { + EnqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error +} diff --git a/cmd/lotus-sim/simulation/stages/pipeline.go b/cmd/lotus-sim/simulation/stages/pipeline.go new file mode 100644 index 000000000..317e5b5a9 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/pipeline.go @@ -0,0 +1,31 @@ +package stages + +// DefaultPipeline returns the default stage pipeline. This pipeline. +// +// 1. Funds a "funding" actor, if necessary. +// 2. Submits any ready window posts. +// 3. Submits any ready prove commits. +// 4. Submits pre-commits with the remaining gas. +func DefaultPipeline() ([]Stage, error) { + // TODO: make this configurable. E.g., through DI? + // Ideally, we'd also be able to change priority, limit throughput (by limiting gas in the + // block builder, etc. + funding, err := NewFundingStage() + if err != nil { + return nil, err + } + wdpost, err := NewWindowPoStStage() + if err != nil { + return nil, err + } + provecommit, err := NewProveCommitStage(funding) + if err != nil { + return nil, err + } + precommit, err := NewPreCommitStage(funding, provecommit) + if err != nil { + return nil, err + } + + return []Stage{funding, wdpost, provecommit, precommit}, nil +} diff --git a/cmd/lotus-sim/simulation/stages/precommit_stage.go b/cmd/lotus-sim/simulation/stages/precommit_stage.go new file mode 100644 index 000000000..5b9fed09e --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/precommit_stage.go @@ -0,0 +1,347 @@ +package stages + +import ( + "context" + "sort" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" +) + +const ( + minPreCommitBatchSize = 1 + maxPreCommitBatchSize = miner5.PreCommitSectorBatchMaxSize +) + +type PreCommitStage struct { + funding Funding + committer Committer + + // The tiers represent the top 1%, top 10%, and everyone else. When sealing sectors, we seal + // a group of sectors for the top 1%, a group (half that size) for the top 10%, and one + // sector for everyone else. We determine these rates by looking at two power tables. + // TODO Ideally we'd "learn" this distribution from the network. But this is good enough for + // now. + top1, top10, rest actorIter + initialized bool +} + +func NewPreCommitStage(funding Funding, committer Committer) (*PreCommitStage, error) { + return &PreCommitStage{ + funding: funding, + committer: committer, + }, nil +} + +func (*PreCommitStage) Name() string { + return "pre-commit" +} + +// packPreCommits packs pre-commit messages until the block is full. +func (stage *PreCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + if !stage.initialized { + if err := stage.load(ctx, bb); err != nil { + return err + } + } + + var ( + full bool + top1Count, top10Count, restCount int + ) + start := time.Now() + defer func() { + if _err != nil { + return + } + bb.L().Debugw("packed pre commits", + "done", top1Count+top10Count+restCount, + "top1", top1Count, + "top10", top10Count, + "rest", restCount, + "filled-block", full, + "duration", time.Since(start), + ) + }() + + var top1Miners, top10Miners, restMiners int + for i := 0; ; i++ { + var ( + minerAddr address.Address + count *int + ) + + // We pre-commit for the top 1%, 10%, and the of the network 1/3rd of the time each. + // This won't yield the most accurate distribution... but it'll give us a good + // enough distribution. + switch { + case (i%3) <= 0 && top1Miners < stage.top1.len(): + count = &top1Count + minerAddr = stage.top1.next() + top1Miners++ + case (i%3) <= 1 && top10Miners < stage.top10.len(): + count = &top10Count + minerAddr = stage.top10.next() + top10Miners++ + case (i%3) <= 2 && restMiners < stage.rest.len(): + count = &restCount + minerAddr = stage.rest.next() + restMiners++ + default: + // Well, we've run through all miners. + return nil + } + + var ( + added int + err error + ) + added, full, err = stage.packMiner(ctx, bb, minerAddr, maxProveCommitBatchSize) + if err != nil { + return xerrors.Errorf("failed to pack precommits for miner %s: %w", minerAddr, err) + } + *count += added + if full { + return nil + } + } +} + +// packPreCommitsMiner packs count pre-commits for the given miner. +func (stage *PreCommitStage) packMiner( + ctx context.Context, bb *blockbuilder.BlockBuilder, + minerAddr address.Address, count int, +) (int, bool, error) { + log := bb.L().With("miner", minerAddr) + epoch := bb.Height() + nv := bb.NetworkVersion() + + minerActor, err := bb.StateTree().GetActor(minerAddr) + if err != nil { + return 0, false, err + } + minerState, err := miner.Load(bb.ActorStore(), minerActor) + if err != nil { + return 0, false, err + } + + minerInfo, err := minerState.Info() + if err != nil { + return 0, false, err + } + + // Make sure the miner is funded. + minerBalance, err := minerState.AvailableBalance(minerActor.Balance) + if err != nil { + return 0, false, err + } + + if big.Cmp(minerBalance, MinimumFunds) < 0 { + err := stage.funding.Fund(bb, minerAddr) + if err != nil { + if blockbuilder.IsOutOfGas(err) { + return 0, true, nil + } + return 0, false, err + } + } + + // Generate pre-commits. + sealType, err := miner.PreferredSealProofTypeFromWindowPoStType( + nv, minerInfo.WindowPoStProofType, + ) + if err != nil { + return 0, false, err + } + + sectorNos, err := minerState.UnallocatedSectorNumbers(count) + if err != nil { + return 0, false, err + } + + expiration := epoch + policy.GetMaxSectorExpirationExtension() + infos := make([]miner.SectorPreCommitInfo, len(sectorNos)) + for i, sno := range sectorNos { + infos[i] = miner.SectorPreCommitInfo{ + SealProof: sealType, + SectorNumber: sno, + SealedCID: mock.MockCommR(minerAddr, sno), + SealRandEpoch: epoch - 1, + Expiration: expiration, + } + } + + // Commit the pre-commits. + added := 0 + if nv >= network.Version13 { + targetBatchSize := maxPreCommitBatchSize + for targetBatchSize >= minPreCommitBatchSize && len(infos) >= minPreCommitBatchSize { + batch := infos + if len(batch) > targetBatchSize { + batch = batch[:targetBatchSize] + } + params := miner5.PreCommitSectorBatchParams{ + Sectors: batch, + } + enc, err := actors.SerializeParams(¶ms) + if err != nil { + return added, false, err + } + // NOTE: just in-case, sendAndFund will "fund" and re-try for any message + // that fails due to "insufficient funds". + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + To: minerAddr, + From: minerInfo.Worker, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.PreCommitSectorBatch, + Params: enc, + }); blockbuilder.IsOutOfGas(err) { + // try again with a smaller batch. + targetBatchSize /= 2 + continue + } else if aerr, ok := err.(aerrors.ActorError); ok && !aerr.IsFatal() { + // Log the error and move on. No reason to stop. + log.Errorw("failed to pre-commit for unknown reasons", + "error", aerr, + "sectors", batch, + ) + return added, false, nil + } else if err != nil { + return added, false, err + } + + for _, info := range batch { + if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil { + return added, false, err + } + added++ + } + infos = infos[len(batch):] + } + } + for _, info := range infos { + enc, err := actors.SerializeParams(&info) //nolint + if err != nil { + return 0, false, err + } + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + To: minerAddr, + From: minerInfo.Worker, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.PreCommitSector, + Params: enc, + }); blockbuilder.IsOutOfGas(err) { + return added, true, nil + } else if err != nil { + return added, false, err + } + + if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil { + return added, false, err + } + added++ + } + return added, false, nil +} + +func (stage *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + bb.L().Infow("loading miner power for pre-commits") + start := time.Now() + defer func() { + if _err != nil { + return + } + bb.L().Infow("loaded miner power for pre-commits", + "duration", time.Since(start), + "top1", stage.top1.len(), + "top10", stage.top10.len(), + "rest", stage.rest.len(), + ) + }() + + store := bb.ActorStore() + st := bb.ParentStateTree() + powerState, err := loadPower(store, st) + if err != nil { + return xerrors.Errorf("failed to power actor: %w", err) + } + + type onboardingInfo struct { + addr address.Address + sectorCount uint64 + } + var sealList []onboardingInfo + err = powerState.ForEachClaim(func(addr address.Address, claim power.Claim) error { + if claim.RawBytePower.IsZero() { + return nil + } + + minerState, err := loadMiner(store, st, addr) + if err != nil { + return err + } + info, err := minerState.Info() + if err != nil { + return err + } + + sectorCount := sectorsFromClaim(info.SectorSize, claim) + + if sectorCount > 0 { + sealList = append(sealList, onboardingInfo{addr, uint64(sectorCount)}) + } + return nil + }) + if err != nil { + return err + } + + if len(sealList) == 0 { + return xerrors.Errorf("simulation has no miners") + } + + // Now that we have a list of sealing miners, sort them into percentiles. + sort.Slice(sealList, func(i, j int) bool { + return sealList[i].sectorCount < sealList[j].sectorCount + }) + + // reset, just in case. + stage.top1 = actorIter{} + stage.top10 = actorIter{} + stage.rest = actorIter{} + + for i, oi := range sealList { + var dist *actorIter + if i < len(sealList)/100 { + dist = &stage.top1 + } else if i < len(sealList)/10 { + dist = &stage.top10 + } else { + dist = &stage.rest + } + dist.add(oi.addr) + } + + stage.top1.shuffle() + stage.top10.shuffle() + stage.rest.shuffle() + + stage.initialized = true + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/provecommit_stage.go b/cmd/lotus-sim/simulation/stages/provecommit_stage.go new file mode 100644 index 000000000..6cbca7de9 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/provecommit_stage.go @@ -0,0 +1,372 @@ +package stages + +import ( + "context" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" +) + +const ( + minProveCommitBatchSize = 4 + maxProveCommitBatchSize = miner5.MaxAggregatedSectors +) + +type ProveCommitStage struct { + funding Funding + // We track the set of pending commits. On simulation load, and when a new pre-commit is + // added to the chain, we put the commit in this queue. advanceEpoch(currentEpoch) should be + // called on this queue at every epoch before using it. + commitQueue commitQueue + initialized bool +} + +func NewProveCommitStage(funding Funding) (*ProveCommitStage, error) { + return &ProveCommitStage{ + funding: funding, + }, nil +} + +func (*ProveCommitStage) Name() string { + return "prove-commit" +} + +func (stage *ProveCommitStage) EnqueueProveCommit( + minerAddr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo, +) error { + return stage.commitQueue.enqueueProveCommit(minerAddr, preCommitEpoch, info) +} + +// packProveCommits packs all prove-commits for all "ready to be proven" sectors until it fills the +// block or runs out. +func (stage *ProveCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + if !stage.initialized { + if err := stage.load(ctx, bb); err != nil { + return err + } + } + // Roll the commitQueue forward. + stage.commitQueue.advanceEpoch(bb.Height()) + + start := time.Now() + var failed, done, unbatched, count int + defer func() { + if _err != nil { + return + } + remaining := stage.commitQueue.ready() + bb.L().Debugw("packed prove commits", + "remaining", remaining, + "done", done, + "failed", failed, + "unbatched", unbatched, + "miners-processed", count, + "duration", time.Since(start), + ) + }() + + for { + addr, pending, ok := stage.commitQueue.nextMiner() + if !ok { + return nil + } + + res, err := stage.packProveCommitsMiner(ctx, bb, addr, pending) + if err != nil { + return err + } + failed += res.failed + done += res.done + unbatched += res.unbatched + count++ + if res.full { + return nil + } + } +} + +type proveCommitResult struct { + done, failed, unbatched int + full bool +} + +// packProveCommitsMiner enqueues a prove commits from the given miner until it runs out of +// available prove-commits, batching as much as possible. +// +// This function will fund as necessary from the "burnt funds actor" (look, it's convenient). +func (stage *ProveCommitStage) packProveCommitsMiner( + ctx context.Context, bb *blockbuilder.BlockBuilder, minerAddr address.Address, + pending minerPendingCommits, +) (res proveCommitResult, _err error) { + minerActor, err := bb.StateTree().GetActor(minerAddr) + if err != nil { + return res, err + } + minerState, err := miner.Load(bb.ActorStore(), minerActor) + if err != nil { + return res, err + } + info, err := minerState.Info() + if err != nil { + return res, err + } + + log := bb.L().With("miner", minerAddr) + + nv := bb.NetworkVersion() + for sealType, snos := range pending { + if nv >= network.Version13 { + for len(snos) > minProveCommitBatchSize { + batchSize := maxProveCommitBatchSize + if len(snos) < batchSize { + batchSize = len(snos) + } + batch := snos[:batchSize] + + proof, err := mock.MockAggregateSealProof(sealType, minerAddr, batchSize) + if err != nil { + return res, err + } + + params := miner5.ProveCommitAggregateParams{ + SectorNumbers: bitfield.New(), + AggregateProof: proof, + } + for _, sno := range batch { + params.SectorNumbers.Set(uint64(sno)) + } + + enc, err := actors.SerializeParams(¶ms) + if err != nil { + return res, err + } + + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + From: info.Worker, + To: minerAddr, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.ProveCommitAggregate, + Params: enc, + }); err == nil { + res.done += len(batch) + } else if blockbuilder.IsOutOfGas(err) { + res.full = true + return res, nil + } else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() { + // If we get a random error, or a fatal actor error, bail. + return res, err + } else if aerr.RetCode() == exitcode.ErrNotFound || aerr.RetCode() == exitcode.ErrIllegalArgument { + // If we get a "not-found" or illegal argument error, try to + // remove any missing prove-commits and continue. This can + // happen either because: + // + // 1. The pre-commit failed on execution (but not when + // packing). This shouldn't happen, but we might as well + // gracefully handle it. + // 2. The pre-commit has expired. We'd have to be really + // backloged to hit this case, but we might as well handle + // it. + // First, split into "good" and "missing" + good, err := stage.filterProveCommits(ctx, bb, minerAddr, batch) + if err != nil { + log.Errorw("failed to filter prove commits", "error", err) + // fail with the original error. + return res, aerr + } + removed := len(batch) - len(good) + if removed == 0 { + log.Errorw("failed to prove-commit for unknown reasons", + "error", aerr, + "sectors", batch, + ) + res.failed += len(batch) + } else if len(good) == 0 { + log.Errorw("failed to prove commit missing pre-commits", + "error", aerr, + "discarded", removed, + ) + res.failed += len(batch) + } else { + // update the pending sector numbers in-place to remove the expired ones. + snos = snos[removed:] + copy(snos, good) + pending.finish(sealType, removed) + + log.Errorw("failed to prove commit expired/missing pre-commits", + "error", aerr, + "discarded", removed, + "kept", len(good), + ) + res.failed += removed + + // Then try again. + continue + } + } else { + log.Errorw("failed to prove commit sector(s)", + "error", err, + "sectors", batch, + ) + res.failed += len(batch) + } + pending.finish(sealType, len(batch)) + snos = snos[len(batch):] + } + } + for len(snos) > 0 && res.unbatched < power5.MaxMinerProveCommitsPerEpoch { + sno := snos[0] + snos = snos[1:] + + proof, err := mock.MockSealProof(sealType, minerAddr) + if err != nil { + return res, err + } + params := miner.ProveCommitSectorParams{ + SectorNumber: sno, + Proof: proof, + } + enc, err := actors.SerializeParams(¶ms) + if err != nil { + return res, err + } + if _, err := stage.funding.SendAndFund(bb, &types.Message{ + From: info.Worker, + To: minerAddr, + Value: abi.NewTokenAmount(0), + Method: miner.Methods.ProveCommitSector, + Params: enc, + }); err == nil { + res.unbatched++ + res.done++ + } else if blockbuilder.IsOutOfGas(err) { + res.full = true + return res, nil + } else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() { + return res, err + } else { + log.Errorw("failed to prove commit sector(s)", + "error", err, + "sectors", []abi.SectorNumber{sno}, + ) + res.failed++ + } + // mark it as "finished" regardless so we skip it. + pending.finish(sealType, 1) + } + // if we get here, we can't pre-commit anything more. + } + return res, nil +} + +// loadMiner enqueue all pending prove-commits for the given miner. This is called on load to +// populate the commitQueue and should not need to be called later. +// +// It will drop any pre-commits that have already expired. +func (stage *ProveCommitStage) loadMiner(ctx context.Context, bb *blockbuilder.BlockBuilder, addr address.Address) error { + epoch := bb.Height() + av := bb.ActorsVersion() + minerState, err := loadMiner(bb.ActorStore(), bb.ParentStateTree(), addr) + if err != nil { + return err + } + + // Find all pending prove commits and group by proof type. Really, there should never + // (except during upgrades be more than one type. + var total, dropped int + err = minerState.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error { + total++ + msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + if epoch > info.PreCommitEpoch+msd { + dropped++ + return nil + } + return stage.commitQueue.enqueueProveCommit(addr, info.PreCommitEpoch, info.Info) + }) + if err != nil { + return err + } + if dropped > 0 { + bb.L().Warnw("dropped expired pre-commits on load", + "miner", addr, + "total", total, + "expired", dropped, + ) + } + return nil +} + +// filterProveCommits filters out expired and/or missing pre-commits. +func (stage *ProveCommitStage) filterProveCommits( + ctx context.Context, bb *blockbuilder.BlockBuilder, + minerAddr address.Address, snos []abi.SectorNumber, +) ([]abi.SectorNumber, error) { + act, err := bb.StateTree().GetActor(minerAddr) + if err != nil { + return nil, err + } + + minerState, err := miner.Load(bb.ActorStore(), act) + if err != nil { + return nil, err + } + + nextEpoch := bb.Height() + av := bb.ActorsVersion() + + good := make([]abi.SectorNumber, 0, len(snos)) + for _, sno := range snos { + info, err := minerState.GetPrecommittedSector(sno) + if err != nil { + return nil, err + } + if info == nil { + continue + } + msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof) + if nextEpoch > info.PreCommitEpoch+msd { + continue + } + good = append(good, sno) + } + return good, nil +} + +func (stage *ProveCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) error { + stage.initialized = false // in case something failes while we're doing this. + stage.commitQueue = commitQueue{offset: bb.Height()} + powerState, err := loadPower(bb.ActorStore(), bb.ParentStateTree()) + if err != nil { + return err + } + + err = powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error { + // TODO: If we want to finish pre-commits for "new" miners, we'll need to change + // this. + if claim.RawBytePower.IsZero() { + return nil + } + return stage.loadMiner(ctx, bb, minerAddr) + }) + if err != nil { + return err + } + + stage.initialized = true + return nil +} diff --git a/cmd/lotus-sim/simulation/stages/util.go b/cmd/lotus-sim/simulation/stages/util.go new file mode 100644 index 000000000..97c1e57af --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/util.go @@ -0,0 +1,51 @@ +package stages + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +func loadMiner(store adt.Store, st types.StateTree, addr address.Address) (miner.State, error) { + minerActor, err := st.GetActor(addr) + if err != nil { + return nil, err + } + return miner.Load(store, minerActor) +} + +func loadPower(store adt.Store, st types.StateTree) (power.State, error) { + powerActor, err := st.GetActor(power.Address) + if err != nil { + return nil, err + } + return power.Load(store, powerActor) +} + +// Compute the number of sectors a miner has from their power claim. +func sectorsFromClaim(sectorSize abi.SectorSize, c power.Claim) int64 { + if c.RawBytePower.Int == nil { + return 0 + } + sectorCount := big.Div(c.RawBytePower, big.NewIntUnsigned(uint64(sectorSize))) + if !sectorCount.IsInt64() { + panic("impossible number of sectors") + } + return sectorCount.Int64() +} + +func postChainCommitInfo(ctx context.Context, bb *blockbuilder.BlockBuilder, epoch abi.ChainEpoch) (abi.Randomness, error) { + cs := bb.StateManager().ChainStore() + ts := bb.ParentTipSet() + commitRand, err := cs.GetChainRandomness(ctx, ts.Cids(), crypto.DomainSeparationTag_PoStChainCommit, epoch, nil, true) + return commitRand, err +} diff --git a/cmd/lotus-sim/simulation/stages/windowpost_stage.go b/cmd/lotus-sim/simulation/stages/windowpost_stage.go new file mode 100644 index 000000000..68f8ea179 --- /dev/null +++ b/cmd/lotus-sim/simulation/stages/windowpost_stage.go @@ -0,0 +1,317 @@ +package stages + +import ( + "context" + "math" + "time" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock" +) + +type WindowPoStStage struct { + // We track the window post periods per miner and assume that no new miners are ever added. + + // We record all pending window post messages, and the epoch up through which we've + // generated window post messages. + pendingWposts []*types.Message + wpostPeriods [][]address.Address // (epoch % (epochs in a deadline)) -> miner + nextWpostEpoch abi.ChainEpoch +} + +func NewWindowPoStStage() (*WindowPoStStage, error) { + return new(WindowPoStStage), nil +} + +func (*WindowPoStStage) Name() string { + return "window-post" +} + +// packWindowPoSts packs window posts until either the block is full or all healty sectors +// have been proven. It does not recover sectors. +func (stage *WindowPoStStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + // Push any new window posts into the queue. + if err := stage.tick(ctx, bb); err != nil { + return err + } + done := 0 + failed := 0 + defer func() { + if _err != nil { + return + } + + bb.L().Debugw("packed window posts", + "done", done, + "failed", failed, + "remaining", len(stage.pendingWposts), + ) + }() + // Then pack as many as we can. + for len(stage.pendingWposts) > 0 { + next := stage.pendingWposts[0] + if _, err := bb.PushMessage(next); err != nil { + if blockbuilder.IsOutOfGas(err) { + return nil + } + if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() { + return err + } + bb.L().Errorw("failed to submit windowed post", + "error", err, + "miner", next.To, + ) + failed++ + } else { + done++ + } + + stage.pendingWposts = stage.pendingWposts[1:] + } + stage.pendingWposts = nil + return nil +} + +// stepWindowPoStsMiner enqueues all missing window posts for the current epoch for the given miner. +func (stage *WindowPoStStage) queueMiner( + ctx context.Context, bb *blockbuilder.BlockBuilder, + addr address.Address, minerState miner.State, + commitEpoch abi.ChainEpoch, commitRand abi.Randomness, +) error { + + if active, err := minerState.DeadlineCronActive(); err != nil { + return err + } else if !active { + return nil + } + + minerInfo, err := minerState.Info() + if err != nil { + return err + } + + di, err := minerState.DeadlineInfo(bb.Height()) + if err != nil { + return err + } + di = di.NextNotElapsed() + + dl, err := minerState.LoadDeadline(di.Index) + if err != nil { + return err + } + + provenBf, err := dl.PartitionsPoSted() + if err != nil { + return err + } + proven, err := provenBf.AllMap(math.MaxUint64) + if err != nil { + return err + } + + poStBatchSize, err := policy.GetMaxPoStPartitions(bb.NetworkVersion(), minerInfo.WindowPoStProofType) + if err != nil { + return err + } + + var ( + partitions []miner.PoStPartition + partitionGroups [][]miner.PoStPartition + ) + // Only prove partitions with live sectors. + err = dl.ForEachPartition(func(idx uint64, part miner.Partition) error { + if proven[idx] { + return nil + } + // NOTE: We're mimicing the behavior of wdpost_run.go here. + if len(partitions) > 0 && idx%uint64(poStBatchSize) == 0 { + partitionGroups = append(partitionGroups, partitions) + partitions = nil + + } + live, err := part.LiveSectors() + if err != nil { + return err + } + liveCount, err := live.Count() + if err != nil { + return err + } + faulty, err := part.FaultySectors() + if err != nil { + return err + } + faultyCount, err := faulty.Count() + if err != nil { + return err + } + if liveCount-faultyCount > 0 { + partitions = append(partitions, miner.PoStPartition{Index: idx}) + } + return nil + }) + if err != nil { + return err + } + if len(partitions) > 0 { + partitionGroups = append(partitionGroups, partitions) + partitions = nil + } + + proof, err := mock.MockWindowPoStProof(minerInfo.WindowPoStProofType, addr) + if err != nil { + return err + } + for _, group := range partitionGroups { + params := miner.SubmitWindowedPoStParams{ + Deadline: di.Index, + Partitions: group, + Proofs: []proof5.PoStProof{{ + PoStProof: minerInfo.WindowPoStProofType, + ProofBytes: proof, + }}, + ChainCommitEpoch: commitEpoch, + ChainCommitRand: commitRand, + } + enc, aerr := actors.SerializeParams(¶ms) + if aerr != nil { + return xerrors.Errorf("could not serialize submit window post parameters: %w", aerr) + } + msg := &types.Message{ + To: addr, + From: minerInfo.Worker, + Method: miner.Methods.SubmitWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + } + stage.pendingWposts = append(stage.pendingWposts, msg) + } + return nil +} + +func (stage *WindowPoStStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) { + bb.L().Info("loading window post info") + + start := time.Now() + defer func() { + if _err != nil { + return + } + + bb.L().Infow("loaded window post info", "duration", time.Since(start)) + }() + + // reset + stage.wpostPeriods = make([][]address.Address, miner.WPoStChallengeWindow) + stage.pendingWposts = nil + stage.nextWpostEpoch = bb.Height() + 1 + + st := bb.ParentStateTree() + store := bb.ActorStore() + + powerState, err := loadPower(store, st) + if err != nil { + return err + } + + commitEpoch := bb.ParentTipSet().Height() + commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch) + if err != nil { + return err + } + + return powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error { + // TODO: If we start recovering power, we'll need to change this. + if claim.RawBytePower.IsZero() { + return nil + } + + minerState, err := loadMiner(store, st, minerAddr) + if err != nil { + return err + } + + // Shouldn't be necessary if the miner has power, but we might as well be safe. + if active, err := minerState.DeadlineCronActive(); err != nil { + return err + } else if !active { + return nil + } + + // Record when we need to prove for this miner. + dinfo, err := minerState.DeadlineInfo(bb.Height()) + if err != nil { + return err + } + dinfo = dinfo.NextNotElapsed() + + ppOffset := int(dinfo.PeriodStart % miner.WPoStChallengeWindow) + stage.wpostPeriods[ppOffset] = append(stage.wpostPeriods[ppOffset], minerAddr) + + return stage.queueMiner(ctx, bb, minerAddr, minerState, commitEpoch, commitRand) + }) +} + +func (stage *WindowPoStStage) tick(ctx context.Context, bb *blockbuilder.BlockBuilder) error { + // If this is our first time, load from scratch. + if stage.wpostPeriods == nil { + return stage.load(ctx, bb) + } + + targetHeight := bb.Height() + now := time.Now() + was := len(stage.pendingWposts) + count := 0 + defer func() { + bb.L().Debugw("computed window posts", + "miners", count, + "count", len(stage.pendingWposts)-was, + "duration", time.Since(now), + ) + }() + + st := bb.ParentStateTree() + store := bb.ActorStore() + + // Perform a bit of catch up. This lets us do things like skip blocks at upgrades then catch + // up to make the simulation easier. + for ; stage.nextWpostEpoch <= targetHeight; stage.nextWpostEpoch++ { + if stage.nextWpostEpoch+miner.WPoStChallengeWindow < targetHeight { + bb.L().Warnw("skipping old window post", "deadline-open", stage.nextWpostEpoch) + continue + } + commitEpoch := stage.nextWpostEpoch - 1 + commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch) + if err != nil { + return err + } + + for _, addr := range stage.wpostPeriods[int(stage.nextWpostEpoch%miner.WPoStChallengeWindow)] { + minerState, err := loadMiner(store, st, addr) + if err != nil { + return err + } + + if err := stage.queueMiner(ctx, bb, addr, minerState, commitEpoch, commitRand); err != nil { + return err + } + count++ + } + + } + return nil +} diff --git a/cmd/lotus-sim/simulation/step.go b/cmd/lotus-sim/simulation/step.go new file mode 100644 index 000000000..902f2ad6c --- /dev/null +++ b/cmd/lotus-sim/simulation/step.go @@ -0,0 +1,71 @@ +package simulation + +import ( + "context" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder" +) + +// Step steps the simulation forward one step. This may move forward by more than one epoch. +func (sim *Simulation) Step(ctx context.Context) (*types.TipSet, error) { + log.Infow("step", "epoch", sim.head.Height()+1) + messages, err := sim.popNextMessages(ctx) + if err != nil { + return nil, xerrors.Errorf("failed to select messages for block: %w", err) + } + head, err := sim.makeTipSet(ctx, messages) + if err != nil { + return nil, xerrors.Errorf("failed to make tipset: %w", err) + } + if err := sim.SetHead(head); err != nil { + return nil, xerrors.Errorf("failed to update head: %w", err) + } + return head, nil +} + +// popNextMessages generates/picks a set of messages to be included in the next block. +// +// - This function is destructive and should only be called once per epoch. +// - This function does not store anything in the repo. +// - This function handles all gas estimation. The returned messages should all fit in a single +// block. +func (sim *Simulation) popNextMessages(ctx context.Context) ([]*types.Message, error) { + parentTs := sim.head + + // First we make sure we don't have an upgrade at this epoch. If we do, we return no + // messages so we can just create an empty block at that epoch. + // + // This isn't what the network does, but it makes things easier. Otherwise, we'd need to run + // migrations before this epoch and I'd rather not deal with that. + nextHeight := parentTs.Height() + 1 + prevVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight-1) + nextVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight) + if nextVer != prevVer { + log.Warnw("packing no messages for version upgrade block", + "old", prevVer, + "new", nextVer, + "epoch", nextHeight, + ) + return nil, nil + } + + bb, err := blockbuilder.NewBlockBuilder( + ctx, log.With("simulation", sim.name), + sim.StateManager, parentTs, + ) + if err != nil { + return nil, err + } + + for _, stage := range sim.stages { + // We're intentionally ignoring the "full" signal so we can try to pack a few more + // messages. + if err := stage.PackMessages(ctx, bb); err != nil && !blockbuilder.IsOutOfGas(err) { + return nil, xerrors.Errorf("when packing messages with %s: %w", stage.Name(), err) + } + } + return bb.Messages(), nil +} diff --git a/cmd/lotus-sim/upgrade.go b/cmd/lotus-sim/upgrade.go new file mode 100644 index 000000000..dfc726d6b --- /dev/null +++ b/cmd/lotus-sim/upgrade.go @@ -0,0 +1,109 @@ +package main + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" +) + +var upgradeCommand = &cli.Command{ + Name: "upgrade", + Description: "Modifies network upgrade heights.", + Subcommands: []*cli.Command{ + upgradeSetCommand, + upgradeList, + }, +} + +var upgradeList = &cli.Command{ + Name: "list", + Description: "Lists all pending upgrades.", + Subcommands: []*cli.Command{ + upgradeSetCommand, + }, + Action: func(cctx *cli.Context) (err error) { + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + upgrades, err := sim.ListUpgrades() + if err != nil { + return err + } + + tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0) + fmt.Fprintf(tw, "version\theight\tepochs\tmigration\texpensive") + epoch := sim.GetHead().Height() + for _, upgrade := range upgrades { + fmt.Fprintf( + tw, "%d\t%d\t%+d\t%t\t%t", + upgrade.Network, upgrade.Height, upgrade.Height-epoch, + upgrade.Migration != nil, + upgrade.Expensive, + ) + } + return nil + }, +} + +var upgradeSetCommand = &cli.Command{ + Name: "set", + ArgsUsage: " [+]", + Description: "Set a network upgrade height. Prefix with '+' to set it relative to the last epoch.", + Action: func(cctx *cli.Context) (err error) { + args := cctx.Args() + if args.Len() != 2 { + return fmt.Errorf("expected 2 arguments") + } + nvString := args.Get(0) + networkVersion, err := strconv.ParseUint(nvString, 10, 32) + if err != nil { + return fmt.Errorf("failed to parse network version %q: %w", nvString, err) + } + heightString := args.Get(1) + relative := false + if strings.HasPrefix(heightString, "+") { + heightString = heightString[1:] + relative = true + } + height, err := strconv.ParseInt(heightString, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse height version %q: %w", heightString, err) + } + + node, err := open(cctx) + if err != nil { + return err + } + defer func() { + if cerr := node.Close(); err == nil { + err = cerr + } + }() + + sim, err := node.LoadSim(cctx.Context, cctx.String("simulation")) + if err != nil { + return err + } + if relative { + height += int64(sim.GetHead().Height()) + } + return sim.SetUpgradeHeight(network.Version(networkVersion), abi.ChainEpoch(height)) + }, +} diff --git a/cmd/lotus-sim/util.go b/cmd/lotus-sim/util.go new file mode 100644 index 000000000..cd15cca0d --- /dev/null +++ b/cmd/lotus-sim/util.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/cmd/lotus-sim/simulation" + "github.com/filecoin-project/lotus/lib/ulimit" +) + +func open(cctx *cli.Context) (*simulation.Node, error) { + _, _, err := ulimit.ManageFdLimit() + if err != nil { + fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to raise ulimit: %s\n", err) + } + return simulation.OpenNode(cctx.Context, cctx.String("repo")) +} diff --git a/cmd/lotus-storage-miner/actor.go b/cmd/lotus-storage-miner/actor.go index 7e428d0e4..57a6e4e51 100644 --- a/cmd/lotus-storage-miner/actor.go +++ b/cmd/lotus-storage-miner/actor.go @@ -388,12 +388,15 @@ var actorControlList = &cli.Command{ Name: "verbose", }, &cli.BoolFlag{ - Name: "color", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -435,6 +438,7 @@ var actorControlList = &cli.Command{ commit := map[address.Address]struct{}{} precommit := map[address.Address]struct{}{} terminate := map[address.Address]struct{}{} + dealPublish := map[address.Address]struct{}{} post := map[address.Address]struct{}{} for _, ca := range mi.ControlAddresses { @@ -471,6 +475,16 @@ var actorControlList = &cli.Command{ terminate[ca] = struct{}{} } + for _, ca := range ac.DealPublishControl { + ca, err := api.StateLookupID(ctx, ca, types.EmptyTSK) + if err != nil { + return err + } + + delete(post, ca) + dealPublish[ca] = struct{}{} + } + printKey := func(name string, a address.Address) { b, err := api.WalletBalance(ctx, a) if err != nil { @@ -515,6 +529,9 @@ var actorControlList = &cli.Command{ if _, ok := terminate[a]; ok { uses = append(uses, color.YellowString("terminate")) } + if _, ok := dealPublish[a]; ok { + uses = append(uses, color.MagentaString("deals")) + } tw.Write(map[string]interface{}{ "name": name, diff --git a/cmd/lotus-storage-miner/actor_test.go b/cmd/lotus-storage-miner/actor_test.go index 5bc82d842..073a83059 100644 --- a/cmd/lotus-storage-miner/actor_test.go +++ b/cmd/lotus-storage-miner/actor_test.go @@ -7,24 +7,20 @@ import ( "fmt" "regexp" "strconv" - "sync/atomic" "testing" "time" - logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/go-state-types/network" "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/lotuslog" + "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node/repo" - builder "github.com/filecoin-project/lotus/node/test" ) func TestWorkerKeyChange(t *testing.T) { @@ -35,40 +31,21 @@ func TestWorkerKeyChange(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _ = logging.SetLogLevel("*", "INFO") - - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) - - lotuslog.SetupLogLevels() - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("pubsub", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") + kit.QuietMiningLogs() blocktime := 1 * time.Millisecond - - n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithLatestActorsAt(-1), test.FullNodeWithLatestActorsAt(-1)}, test.OneMiner) - - client1 := n[0] - client2 := n[1] - - // Connect the nodes. - addrinfo, err := client1.NetAddrsListen(ctx) - require.NoError(t, err) - err = client2.NetConnect(ctx, addrinfo) - require.NoError(t, err) + client1, client2, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs(), + kit.ConstructorOpts(kit.InstantaneousNetworkVersion(network.Version13)), + ) + ens.InterconnectAll().BeginMining(blocktime) output := bytes.NewBuffer(nil) run := func(cmd *cli.Command, args ...string) error { app := cli.NewApp() app.Metadata = map[string]interface{}{ "repoType": repo.StorageMiner, - "testnode-full": n[0], - "testnode-storage": sn[0], + "testnode-full": client1, + "testnode-storage": miner, } app.Writer = output api.RunningNodeType = api.NodeMiner @@ -85,29 +62,11 @@ func TestWorkerKeyChange(t *testing.T) { return cmd.Action(cctx) } - // setup miner - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, test.MineNext); err != nil { - t.Error(err) - } - } - }() - defer func() { - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done - }() - newKey, err := client1.WalletNew(ctx, types.KTBLS) require.NoError(t, err) // Initialize wallet. - test.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0)) + kit.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0)) require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String())) @@ -127,14 +86,8 @@ func TestWorkerKeyChange(t *testing.T) { require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) output.Reset() - for { - head, err := client1.ChainHead(ctx) - require.NoError(t, err) - if head.Height() >= abi.ChainEpoch(targetEpoch) { - break - } - build.Clock.Sleep(10 * blocktime) - } + client1.WaitTillChain(ctx, kit.HeightAtLeast(abi.ChainEpoch(targetEpoch))) + require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) output.Reset() @@ -143,23 +96,8 @@ func TestWorkerKeyChange(t *testing.T) { // Wait for finality (worker key switch). targetHeight := head.Height() + policy.ChainFinality - for { - head, err := client1.ChainHead(ctx) - require.NoError(t, err) - if head.Height() >= targetHeight { - break - } - build.Clock.Sleep(10 * blocktime) - } + client1.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight)) // Make sure the other node can catch up. - for i := 0; i < 20; i++ { - head, err := client2.ChainHead(ctx) - require.NoError(t, err) - if head.Height() >= targetHeight { - return - } - build.Clock.Sleep(10 * blocktime) - } - t.Fatal("failed to reach target epoch on the second miner") + client2.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight)) } diff --git a/cmd/lotus-storage-miner/allinfo_test.go b/cmd/lotus-storage-miner/allinfo_test.go index 6fa3136d3..5f30b4fec 100644 --- a/cmd/lotus-storage-miner/allinfo_test.go +++ b/cmd/lotus-storage-miner/allinfo_test.go @@ -1,22 +1,18 @@ package main import ( + "context" "flag" "testing" "time" - logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/lotus/itests/kit" "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/node/repo" - builder "github.com/filecoin-project/lotus/node/test" ) func TestMinerAllInfo(t *testing.T) { @@ -24,20 +20,9 @@ func TestMinerAllInfo(t *testing.T) { t.Skip("skipping test in short mode") } - _ = logging.SetLogLevel("*", "INFO") - - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) - _test = true - lotuslog.SetupLogLevels() - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") + kit.QuietMiningLogs() oldDelay := policy.GetPreCommitChallengeDelay() policy.SetPreCommitChallengeDelay(5) @@ -45,15 +30,15 @@ func TestMinerAllInfo(t *testing.T) { policy.SetPreCommitChallengeDelay(oldDelay) }) - var n []test.TestNode - var sn []test.TestStorageNode + client, miner, ens := kit.EnsembleMinimal(t) + ens.InterconnectAll().BeginMining(time.Second) run := func(t *testing.T) { app := cli.NewApp() app.Metadata = map[string]interface{}{ "repoType": repo.StorageMiner, - "testnode-full": n[0], - "testnode-storage": sn[0], + "testnode-full": client, + "testnode-storage": miner, } api.RunningNodeType = api.NodeMiner @@ -62,15 +47,12 @@ func TestMinerAllInfo(t *testing.T) { require.NoError(t, infoAllCmd.Action(cctx)) } - bp := func(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - n, sn = builder.Builder(t, fullOpts, storage) + t.Run("pre-info-all", run) - t.Run("pre-info-all", run) - - return n, sn - } - - test.TestDealFlow(t, bp, time.Second, false, false, 0) + dh := kit.NewDealHarness(t, client, miner, miner) + deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6}) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) + kit.AssertFilesEqual(t, inPath, outPath) t.Run("post-info-all", run) } diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index e7c5e7904..3941ce563 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -3,7 +3,12 @@ package main import ( "context" "fmt" + "math" + corebig "math/big" + "os" "sort" + "strings" + "text/tabwriter" "time" "github.com/fatih/color" @@ -12,6 +17,7 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -21,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -42,8 +49,6 @@ var infoCmd = &cli.Command{ } func infoCmdAct(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -120,19 +125,23 @@ func infoCmdAct(cctx *cli.Context) error { return err } - rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower) - qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower) - fmt.Printf("Power: %s / %s (%0.4f%%)\n", color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), types.DeciStr(pow.TotalPower.QualityAdjPower), - float64(qpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) fmt.Printf("\tRaw: %s / %s (%0.4f%%)\n", color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), types.SizeStr(pow.TotalPower.RawBytePower), - float64(rpercI.Int64())/10000) - + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) secCounts, err := api.StateMinerSectorCount(ctx, maddr, types.EmptyTSK) if err != nil { return err @@ -146,7 +155,7 @@ func infoCmdAct(cctx *cli.Context) error { } else { var faultyPercentage float64 if secCounts.Live != 0 { - faultyPercentage = float64(10000*nfaults/secCounts.Live) / 100. + faultyPercentage = float64(100*nfaults) / float64(secCounts.Live) } fmt.Printf("\tProving: %s (%s Faulty, %.2f%%)\n", types.SizeStr(types.BigMul(types.NewInt(proving), types.NewInt(uint64(mi.SectorSize)))), @@ -157,16 +166,54 @@ func infoCmdAct(cctx *cli.Context) error { if !pow.HasMinPower { fmt.Print("Below minimum power threshold, no blocks will be won") } else { - expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000 - if expWinChance > 0 { - if expWinChance > 1 { - expWinChance = 1 - } - winRate := time.Duration(float64(time.Second*time.Duration(build.BlockDelaySecs)) / expWinChance) - winPerDay := float64(time.Hour*24) / float64(winRate) - fmt.Print("Expected block win rate: ") - color.Blue("%.4f/day (every %s)", winPerDay, winRate.Truncate(time.Second)) + winRatio := new(corebig.Rat).SetFrac( + types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(build.BlocksPerEpoch)).Int, + pow.TotalPower.QualityAdjPower.Int, + ) + + if winRatioFloat, _ := winRatio.Float64(); winRatioFloat > 0 { + + // if the corresponding poisson distribution isn't infinitely small then + // throw it into the mix as well, accounting for multi-wins + winRationWithPoissonFloat := -math.Expm1(-winRatioFloat) + winRationWithPoisson := new(corebig.Rat).SetFloat64(winRationWithPoissonFloat) + if winRationWithPoisson != nil { + winRatio = winRationWithPoisson + winRatioFloat = winRationWithPoissonFloat + } + + weekly, _ := new(corebig.Rat).Mul( + winRatio, + new(corebig.Rat).SetInt64(7*builtin.EpochsInDay), + ).Float64() + + avgDuration, _ := new(corebig.Rat).Mul( + new(corebig.Rat).SetInt64(builtin.EpochDurationSeconds), + new(corebig.Rat).Inv(winRatio), + ).Float64() + + fmt.Print("Projected average block win rate: ") + color.Blue( + "%.02f/week (every %s)", + weekly, + (time.Second * time.Duration(avgDuration)).Truncate(time.Second).String(), + ) + + // Geometric distribution of P(Y < k) calculated as described in https://en.wikipedia.org/wiki/Geometric_distribution#Probability_Outcomes_Examples + // https://www.wolframalpha.com/input/?i=t+%3E+0%3B+p+%3E+0%3B+p+%3C+1%3B+c+%3E+0%3B+c+%3C1%3B+1-%281-p%29%5E%28t%29%3Dc%3B+solve+t + // t == how many dice-rolls (epochs) before win + // p == winRate == ( minerPower / netPower ) + // c == target probability of win ( 99.9% in this case ) + fmt.Print("Projected block win with ") + color.Green( + "99.9%% probability every %s", + (time.Second * time.Duration( + builtin.EpochDurationSeconds*math.Log(1-0.999)/ + math.Log(1-winRatioFloat), + )).Truncate(time.Second).String(), + ) + fmt.Println("(projections DO NOT account for future network and miner growth)") } } @@ -177,29 +224,89 @@ func infoCmdAct(cctx *cli.Context) error { return err } - var nactiveDeals, nVerifDeals, ndeals uint64 - var activeDealBytes, activeVerifDealBytes, dealBytes abi.PaddedPieceSize - for _, deal := range deals { - if deal.State == storagemarket.StorageDealError { - continue - } - - ndeals++ - dealBytes += deal.Proposal.PieceSize - - if deal.State == storagemarket.StorageDealActive { - nactiveDeals++ - activeDealBytes += deal.Proposal.PieceSize - - if deal.Proposal.VerifiedDeal { - nVerifDeals++ - activeVerifDealBytes += deal.Proposal.PieceSize - } + type dealStat struct { + count, verifCount int + bytes, verifBytes uint64 + } + dsAdd := func(ds *dealStat, deal storagemarket.MinerDeal) { + ds.count++ + ds.bytes += uint64(deal.Proposal.PieceSize) + if deal.Proposal.VerifiedDeal { + ds.verifCount++ + ds.verifBytes += uint64(deal.Proposal.PieceSize) } } - fmt.Printf("Deals: %d, %s\n", ndeals, types.SizeStr(types.NewInt(uint64(dealBytes)))) - fmt.Printf("\tActive: %d, %s (Verified: %d, %s)\n", nactiveDeals, types.SizeStr(types.NewInt(uint64(activeDealBytes))), nVerifDeals, types.SizeStr(types.NewInt(uint64(activeVerifDealBytes)))) + showDealStates := map[storagemarket.StorageDealStatus]struct{}{ + storagemarket.StorageDealActive: {}, + storagemarket.StorageDealTransferring: {}, + storagemarket.StorageDealStaged: {}, + storagemarket.StorageDealAwaitingPreCommit: {}, + storagemarket.StorageDealSealing: {}, + storagemarket.StorageDealPublish: {}, + storagemarket.StorageDealCheckForAcceptance: {}, + storagemarket.StorageDealPublishing: {}, + } + + var total dealStat + perState := map[storagemarket.StorageDealStatus]*dealStat{} + for _, deal := range deals { + if _, ok := showDealStates[deal.State]; !ok { + continue + } + if perState[deal.State] == nil { + perState[deal.State] = new(dealStat) + } + + dsAdd(&total, deal) + dsAdd(perState[deal.State], deal) + } + + type wstr struct { + str string + status storagemarket.StorageDealStatus + } + sorted := make([]wstr, 0, len(perState)) + for status, stat := range perState { + st := strings.TrimPrefix(storagemarket.DealStates[status], "StorageDeal") + sorted = append(sorted, wstr{ + str: fmt.Sprintf(" %s:\t%d\t\t%s\t(Verified: %d\t%s)\n", st, stat.count, types.SizeStr(types.NewInt(stat.bytes)), stat.verifCount, types.SizeStr(types.NewInt(stat.verifBytes))), + status: status, + }, + ) + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].status == storagemarket.StorageDealActive || sorted[j].status == storagemarket.StorageDealActive { + return sorted[i].status == storagemarket.StorageDealActive + } + return sorted[i].status > sorted[j].status + }) + + fmt.Printf("Storage Deals: %d, %s\n", total.count, types.SizeStr(types.NewInt(total.bytes))) + + tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0) + for _, e := range sorted { + _, _ = tw.Write([]byte(e.str)) + } + + _ = tw.Flush() + fmt.Println() + + retrievals, err := nodeApi.MarketListRetrievalDeals(ctx) + if err != nil { + return xerrors.Errorf("getting retrieval deal list: %w", err) + } + + var retrComplete dealStat + for _, retrieval := range retrievals { + if retrieval.Status == retrievalmarket.DealStatusCompleted { + retrComplete.count++ + retrComplete.bytes += retrieval.TotalSent + } + } + + fmt.Printf("Retrieval Deals (complete): %d, %s\n", retrComplete.count, types.SizeStr(types.NewInt(retrComplete.bytes))) + fmt.Println() spendable := big.Zero() diff --git a/cmd/lotus-storage-miner/init.go b/cmd/lotus-storage-miner/init.go index 76451f418..1cce52a41 100644 --- a/cmd/lotus-storage-miner/init.go +++ b/cmd/lotus-storage-miner/init.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net/http" "os" "path/filepath" "strconv" @@ -120,7 +121,8 @@ var initCmd = &cli.Command{ }, }, Subcommands: []*cli.Command{ - initRestoreCmd, + restoreCmd, + serviceCmd, }, Action: func(cctx *cli.Context) error { log.Info("Initializing lotus miner") @@ -316,10 +318,10 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, - DealInfo: &sealing.DealInfo{ + DealInfo: &lapi.PieceDealInfo{ DealID: dealID, DealProposal: §or.Deal, - DealSchedule: sealing.DealSchedule{ + DealSchedule: lapi.DealSchedule{ StartEpoch: sector.Deal.StartEpoch, EndEpoch: sector.Deal.EndEpoch, }, @@ -453,14 +455,22 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix)) smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix)) - smgr, err := sectorstorage.New(ctx, lr, stores.NewIndex(), sectorstorage.SealerConfig{ + si := stores.NewIndex() + + lstor, err := stores.NewLocal(ctx, lr, si, nil) + if err != nil { + return err + } + stor := stores.NewRemote(lstor, si, http.Header(sa), 10, &stores.DefaultPartialFileHandler{}) + + smgr, err := sectorstorage.New(ctx, lstor, stor, lr, si, sectorstorage.SealerConfig{ ParallelFetchLimit: 10, AllowAddPiece: true, AllowPreCommit1: true, AllowPreCommit2: true, AllowCommit: true, AllowUnseal: true, - }, nil, sa, wsts, smsts) + }, wsts, smsts) if err != nil { return err } @@ -724,6 +734,8 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, return retval.IDAddress, nil } +// checkV1ApiSupport uses v0 api version to signal support for v1 API +// trying to query the v1 api on older lotus versions would get a 404, which can happen for any number of other reasons func checkV1ApiSupport(ctx context.Context, cctx *cli.Context) error { // check v0 api version to make sure it supports v1 api api0, closer, err := lcli.GetFullNodeAPI(cctx) diff --git a/cmd/lotus-storage-miner/init_restore.go b/cmd/lotus-storage-miner/init_restore.go index b495e1cd9..3b4e2b26d 100644 --- a/cmd/lotus-storage-miner/init_restore.go +++ b/cmd/lotus-storage-miner/init_restore.go @@ -22,6 +22,7 @@ import ( lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/extern/sector-storage/stores" @@ -30,7 +31,7 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -var initRestoreCmd = &cli.Command{ +var restoreCmd = &cli.Command{ Name: "restore", Usage: "Initialize a lotus miner repo from a backup", Flags: []cli.Flag{ @@ -49,129 +50,11 @@ var initRestoreCmd = &cli.Command{ }, ArgsUsage: "[backupFile]", Action: func(cctx *cli.Context) error { - log.Info("Initializing lotus miner using a backup") - if cctx.Args().Len() != 1 { - return xerrors.Errorf("expected 1 argument") - } - ctx := lcli.ReqContext(cctx) + log.Info("Initializing lotus miner using a backup") - log.Info("Trying to connect to full node RPC") - - if err := checkV1ApiSupport(ctx, cctx); err != nil { - return err - } - - api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config - if err != nil { - return err - } - defer closer() - - log.Info("Checking full node version") - - v, err := api.Version(ctx) - if err != nil { - return err - } - - if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) { - return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion) - } - - if !cctx.Bool("nosync") { - if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil { - return xerrors.Errorf("sync wait: %w", err) - } - } - - bf, err := homedir.Expand(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("expand backup file path: %w", err) - } - - st, err := os.Stat(bf) - if err != nil { - return xerrors.Errorf("stat backup file (%s): %w", bf, err) - } - - f, err := os.Open(bf) - if err != nil { - return xerrors.Errorf("opening backup file: %w", err) - } - defer f.Close() // nolint:errcheck - - log.Info("Checking if repo exists") - - repoPath := cctx.String(FlagMinerRepo) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - if ok { - return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo)) - } - - log.Info("Initializing repo") - - if err := r.Init(repo.StorageMiner); err != nil { - return err - } - - lr, err := r.Lock(repo.StorageMiner) - if err != nil { - return err - } - defer lr.Close() //nolint:errcheck - - if cctx.IsSet("config") { - log.Info("Restoring config") - - cf, err := homedir.Expand(cctx.String("config")) - if err != nil { - return xerrors.Errorf("expanding config path: %w", err) - } - - _, err = os.Stat(cf) - if err != nil { - return xerrors.Errorf("stat config file (%s): %w", cf, err) - } - - var cerr error - err = lr.SetConfig(func(raw interface{}) { - rcfg, ok := raw.(*config.StorageMiner) - if !ok { - cerr = xerrors.New("expected miner config") - return - } - - ff, err := config.FromFile(cf, rcfg) - if err != nil { - cerr = xerrors.Errorf("loading config: %w", err) - return - } - - *rcfg = *ff.(*config.StorageMiner) - }) - if cerr != nil { - return cerr - } - if err != nil { - return xerrors.Errorf("setting config: %w", err) - } - - } else { - log.Warn("--config NOT SET, WILL USE DEFAULT VALUES") - } - + var storageCfg *stores.StorageConfig if cctx.IsSet("storage-config") { - log.Info("Restoring storage path config") - cf, err := homedir.Expand(cctx.String("storage-config")) if err != nil { return xerrors.Errorf("expanding storage config path: %w", err) @@ -182,101 +65,233 @@ var initRestoreCmd = &cli.Command{ return xerrors.Errorf("reading storage config: %w", err) } - var cerr error - err = lr.SetStorage(func(scfg *stores.StorageConfig) { - cerr = json.Unmarshal(cfb, scfg) - }) - if cerr != nil { - return xerrors.Errorf("unmarshalling storage config: %w", cerr) - } + storageCfg = &stores.StorageConfig{} + err = json.Unmarshal(cfb, storageCfg) if err != nil { - return xerrors.Errorf("setting storage config: %w", err) + return xerrors.Errorf("cannot unmarshal json for storage config: %w", err) } - } else { - log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED") } - log.Info("Restoring metadata backup") + if err := restore(ctx, cctx, storageCfg, nil, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error { + log.Info("Checking proof parameters") - mds, err := lr.Datastore(context.TODO(), "/metadata") - if err != nil { - return err - } + if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil { + return xerrors.Errorf("fetching proof parameters: %w", err) + } - bar := pb.New64(st.Size()) - br := bar.NewProxyReader(f) - bar.ShowTimeLeft = true - bar.ShowPercent = true - bar.ShowSpeed = true - bar.Units = pb.U_BYTES + log.Info("Configuring miner actor") - bar.Start() - err = backupds.RestoreInto(br, mds) - bar.Finish() + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { + return err + } - if err != nil { - return xerrors.Errorf("restoring metadata: %w", err) - } - - log.Info("Checking actor metadata") - - abytes, err := mds.Get(datastore.NewKey("miner-address")) - if err != nil { - return xerrors.Errorf("getting actor address from metadata datastore: %w", err) - } - - maddr, err := address.NewFromBytes(abytes) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - log.Info("ACTOR ADDRESS: ", maddr.String()) - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize))) - - wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("resolving worker key: %w", err) - } - - has, err := api.WalletHas(ctx, wk) - if err != nil { - return xerrors.Errorf("checking worker address: %w", err) - } - - if !has { - return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr) - } - - log.Info("Checking proof parameters") - - if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(mi.SectorSize)); err != nil { - return xerrors.Errorf("fetching proof parameters: %w", err) - } - - log.Info("Initializing libp2p identity") - - p2pSk, err := makeHostKey(lr) - if err != nil { - return xerrors.Errorf("make host key: %w", err) - } - - peerid, err := peer.IDFromPrivateKey(p2pSk) - if err != nil { - return xerrors.Errorf("peer ID from private key: %w", err) - } - - log.Info("Configuring miner actor") - - if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { + return nil + }); err != nil { return err } return nil }, } + +func restore(ctx context.Context, cctx *cli.Context, strConfig *stores.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi miner.MinerInfo) error) error { + if cctx.Args().Len() != 1 { + return xerrors.Errorf("expected 1 argument") + } + + log.Info("Trying to connect to full node RPC") + + api, closer, err := lcli.GetFullNodeAPIV1(cctx) // TODO: consider storing full node address in config + if err != nil { + return err + } + defer closer() + + log.Info("Checking full node version") + + v, err := api.Version(ctx) + if err != nil { + return err + } + + if !v.APIVersion.EqMajorMinor(lapi.FullAPIVersion1) { + return xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", lapi.FullAPIVersion1, v.APIVersion) + } + + if !cctx.Bool("nosync") { + if err := lcli.SyncWait(ctx, &v0api.WrapperV1Full{FullNode: api}, false); err != nil { + return xerrors.Errorf("sync wait: %w", err) + } + } + + bf, err := homedir.Expand(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("expand backup file path: %w", err) + } + + st, err := os.Stat(bf) + if err != nil { + return xerrors.Errorf("stat backup file (%s): %w", bf, err) + } + + f, err := os.Open(bf) + if err != nil { + return xerrors.Errorf("opening backup file: %w", err) + } + defer f.Close() // nolint:errcheck + + log.Info("Checking if repo exists") + + repoPath := cctx.String(FlagMinerRepo) + r, err := repo.NewFS(repoPath) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + if ok { + return xerrors.Errorf("repo at '%s' is already initialized", cctx.String(FlagMinerRepo)) + } + + log.Info("Initializing repo") + + if err := r.Init(repo.StorageMiner); err != nil { + return err + } + + lr, err := r.Lock(repo.StorageMiner) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + if cctx.IsSet("config") { + log.Info("Restoring config") + + cf, err := homedir.Expand(cctx.String("config")) + if err != nil { + return xerrors.Errorf("expanding config path: %w", err) + } + + _, err = os.Stat(cf) + if err != nil { + return xerrors.Errorf("stat config file (%s): %w", cf, err) + } + + var cerr error + err = lr.SetConfig(func(raw interface{}) { + rcfg, ok := raw.(*config.StorageMiner) + if !ok { + cerr = xerrors.New("expected miner config") + return + } + + ff, err := config.FromFile(cf, rcfg) + if err != nil { + cerr = xerrors.Errorf("loading config: %w", err) + return + } + + *rcfg = *ff.(*config.StorageMiner) + if manageConfig != nil { + cerr = manageConfig(rcfg) + } + }) + if cerr != nil { + return cerr + } + if err != nil { + return xerrors.Errorf("setting config: %w", err) + } + + } else { + log.Warn("--config NOT SET, WILL USE DEFAULT VALUES") + } + + if strConfig != nil { + log.Info("Restoring storage path config") + + err = lr.SetStorage(func(scfg *stores.StorageConfig) { + *scfg = *strConfig + }) + if err != nil { + return xerrors.Errorf("setting storage config: %w", err) + } + } else { + log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED") + } + + log.Info("Restoring metadata backup") + + mds, err := lr.Datastore(context.TODO(), "/metadata") + if err != nil { + return err + } + + bar := pb.New64(st.Size()) + br := bar.NewProxyReader(f) + bar.ShowTimeLeft = true + bar.ShowPercent = true + bar.ShowSpeed = true + bar.Units = pb.U_BYTES + + bar.Start() + err = backupds.RestoreInto(br, mds) + bar.Finish() + + if err != nil { + return xerrors.Errorf("restoring metadata: %w", err) + } + + log.Info("Checking actor metadata") + + abytes, err := mds.Get(datastore.NewKey("miner-address")) + if err != nil { + return xerrors.Errorf("getting actor address from metadata datastore: %w", err) + } + + maddr, err := address.NewFromBytes(abytes) + if err != nil { + return xerrors.Errorf("parsing actor address: %w", err) + } + + log.Info("ACTOR ADDRESS: ", maddr.String()) + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + log.Info("SECTOR SIZE: ", units.BytesSize(float64(mi.SectorSize))) + + wk, err := api.StateAccountKey(ctx, mi.Worker, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("resolving worker key: %w", err) + } + + has, err := api.WalletHas(ctx, wk) + if err != nil { + return xerrors.Errorf("checking worker address: %w", err) + } + + if !has { + return xerrors.Errorf("worker address %s for miner actor %s not present in full node wallet", mi.Worker, maddr) + } + + log.Info("Initializing libp2p identity") + + p2pSk, err := makeHostKey(lr) + if err != nil { + return xerrors.Errorf("make host key: %w", err) + } + + peerid, err := peer.IDFromPrivateKey(p2pSk) + if err != nil { + return xerrors.Errorf("peer ID from private key: %w", err) + } + + return after(api, maddr, peerid, mi) +} diff --git a/cmd/lotus-storage-miner/init_service.go b/cmd/lotus-storage-miner/init_service.go new file mode 100644 index 000000000..ad803a830 --- /dev/null +++ b/cmd/lotus-storage-miner/init_service.go @@ -0,0 +1,152 @@ +package main + +import ( + "context" + "strings" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/node/config" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +const ( + MarketsService = "markets" +) + +var serviceCmd = &cli.Command{ + Name: "service", + Usage: "Initialize a lotus miner sub-service", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "config", + Usage: "config file (config.toml)", + Required: true, + }, + &cli.BoolFlag{ + Name: "nosync", + Usage: "don't check full-node sync status", + }, + &cli.StringSliceFlag{ + Name: "type", + Usage: "type of service to be enabled", + }, + &cli.StringFlag{ + Name: "api-sealer", + Usage: "sealer API info (lotus-miner auth api-info --perm=admin)", + }, + &cli.StringFlag{ + Name: "api-sector-index", + Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)", + }, + }, + ArgsUsage: "[backupFile]", + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + log.Info("Initializing lotus miner service") + + es := EnabledServices(cctx.StringSlice("type")) + + if len(es) == 0 { + return xerrors.Errorf("at least one module must be enabled") + } + + // we should remove this as soon as we have more service types and not just `markets` + if !es.Contains(MarketsService) { + return xerrors.Errorf("markets module must be enabled") + } + + if !cctx.IsSet("api-sealer") { + return xerrors.Errorf("--api-sealer is required without the sealer module enabled") + } + if !cctx.IsSet("api-sector-index") { + return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled") + } + + if err := restore(ctx, cctx, &stores.StorageConfig{}, func(cfg *config.StorageMiner) error { + cfg.Subsystems.EnableMarkets = es.Contains(MarketsService) + cfg.Subsystems.EnableMining = false + cfg.Subsystems.EnableSealing = false + cfg.Subsystems.EnableSectorStorage = false + + if !cfg.Subsystems.EnableSealing { + ai, err := checkApiInfo(ctx, cctx.String("api-sealer")) + if err != nil { + return xerrors.Errorf("checking sealer API: %w", err) + } + cfg.Subsystems.SealerApiInfo = ai + } + + if !cfg.Subsystems.EnableSectorStorage { + ai, err := checkApiInfo(ctx, cctx.String("api-sector-index")) + if err != nil { + return xerrors.Errorf("checking sector index API: %w", err) + } + cfg.Subsystems.SectorIndexApiInfo = ai + } + + return nil + }, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi miner.MinerInfo) error { + if es.Contains(MarketsService) { + log.Info("Configuring miner actor") + + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + + return nil + }, +} + +type EnabledServices []string + +func (es EnabledServices) Contains(name string) bool { + for _, s := range es { + if s == name { + return true + } + } + return false +} + +func checkApiInfo(ctx context.Context, ai string) (string, error) { + ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=") + info := cliutil.ParseApiInfo(ai) + addr, err := info.DialArgs("v0") + if err != nil { + return "", xerrors.Errorf("could not get DialArgs: %w", err) + } + + log.Infof("Checking api version of %s", addr) + + api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) + if err != nil { + return "", err + } + defer closer() + + v, err := api.Version(ctx) + if err != nil { + return "", xerrors.Errorf("checking version: %w", err) + } + + if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) { + return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion) + } + + return ai, nil +} diff --git a/cmd/lotus-storage-miner/main.go b/cmd/lotus-storage-miner/main.go index f5ff25177..c555531d6 100644 --- a/cmd/lotus-storage-miner/main.go +++ b/cmd/lotus-storage-miner/main.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "go.opencensus.io/trace" @@ -61,9 +62,14 @@ func main() { trace.UnregisterExporter(jaeger) jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + if originBefore != nil { return originBefore(cctx) } + return nil } } @@ -81,7 +87,10 @@ func main() { Aliases: []string{"a"}, }, &cli.BoolFlag{ - Name: "color", + // examined in the Before above + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.StringFlag{ Name: "repo", diff --git a/cmd/lotus-storage-miner/market.go b/cmd/lotus-storage-miner/market.go index f46ad32bf..b216d24fc 100644 --- a/cmd/lotus-storage-miner/market.go +++ b/cmd/lotus-storage-miner/market.go @@ -15,6 +15,7 @@ import ( tm "github.com/buger/goterm" "github.com/docker/go-units" + "github.com/fatih/color" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" "github.com/libp2p/go-libp2p-core/peer" @@ -752,9 +753,9 @@ var transfersListCmd = &cli.Command{ Usage: "print verbose transfer details", }, &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, &cli.BoolFlag{ Name: "completed", @@ -770,6 +771,10 @@ var transfersListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + api, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -784,7 +789,6 @@ var transfersListCmd = &cli.Command{ verbose := cctx.Bool("verbose") completed := cctx.Bool("completed") - color := cctx.Bool("color") watch := cctx.Bool("watch") showFailed := cctx.Bool("show-failed") if watch { @@ -798,7 +802,7 @@ var transfersListCmd = &cli.Command{ tm.MoveCursor(1, 1) - lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, color, showFailed) + lcli.OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed) tm.Flush() @@ -823,7 +827,7 @@ var transfersListCmd = &cli.Command{ } } } - lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, color, showFailed) + lcli.OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed) return nil }, } diff --git a/cmd/lotus-storage-miner/proving.go b/cmd/lotus-storage-miner/proving.go index 66007b63d..5dfe5d4ce 100644 --- a/cmd/lotus-storage-miner/proving.go +++ b/cmd/lotus-storage-miner/proving.go @@ -36,8 +36,6 @@ var provingFaultsCmd = &cli.Command{ Name: "faults", Usage: "View the currently known proving faulty sectors information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -90,8 +88,6 @@ var provingInfoCmd = &cli.Command{ Name: "info", Usage: "View current state information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -171,7 +167,7 @@ var provingInfoCmd = &cli.Command{ var faultPerc float64 if proving > 0 { - faultPerc = float64(faults*10000/proving) / 100 + faultPerc = float64(faults * 100 / proving) } fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch) @@ -197,8 +193,6 @@ var provingDeadlinesCmd = &cli.Command{ Name: "deadlines", Usage: "View the current proving period deadlines information", Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err diff --git a/cmd/lotus-storage-miner/retrieval-deals.go b/cmd/lotus-storage-miner/retrieval-deals.go index 03d397852..0411f7f13 100644 --- a/cmd/lotus-storage-miner/retrieval-deals.go +++ b/cmd/lotus-storage-miner/retrieval-deals.go @@ -235,7 +235,7 @@ var retrievalSetAskCmd = &cli.Command{ var retrievalGetAskCmd = &cli.Command{ Name: "get-ask", - Usage: "Get the provider's current retrieval ask", + Usage: "Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command", Flags: []cli.Flag{}, Action: func(cctx *cli.Context) error { ctx := lcli.DaemonContext(cctx) diff --git a/cmd/lotus-storage-miner/run.go b/cmd/lotus-storage-miner/run.go index 5d67cf33d..f276f319c 100644 --- a/cmd/lotus-storage-miner/run.go +++ b/cmd/lotus-storage-miner/run.go @@ -1,37 +1,28 @@ package main import ( - "context" - "net" - "net/http" + "fmt" _ "net/http/pprof" "os" - "os/signal" - "syscall" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/api/v0api" - mux "github.com/gorilla/mux" "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" "github.com/urfave/cli/v2" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "golang.org/x/xerrors" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" ) @@ -128,13 +119,33 @@ var runCmd = &cli.Command{ return xerrors.Errorf("repo at '%s' is not initialized, run 'lotus-miner init' to set it up", minerRepoPath) } + lr, err := r.Lock(repo.StorageMiner) + if err != nil { + return err + } + c, err := lr.Config() + if err != nil { + return err + } + cfg, ok := c.(*config.StorageMiner) + if !ok { + return xerrors.Errorf("invalid config for repo, got: %T", c) + } + + bootstrapLibP2P := cfg.Subsystems.EnableMarkets + + err = lr.Close() + if err != nil { + return err + } + shutdownChan := make(chan struct{}) var minerapi api.StorageMiner stop, err := node.New(ctx, - node.StorageMiner(&minerapi), + node.StorageMiner(&minerapi, cfg.Subsystems), node.Override(new(dtypes.ShutdownChan), shutdownChan), - node.Online(), + node.Base(), node.Repo(r), node.ApplyIf(func(s *node.Settings) bool { return cctx.IsSet("miner-api") }, @@ -152,66 +163,41 @@ var runCmd = &cli.Command{ return xerrors.Errorf("getting API endpoint: %w", err) } - // Bootstrap with full node - remoteAddrs, err := nodeApi.NetAddrsListen(ctx) - if err != nil { - return xerrors.Errorf("getting full node libp2p address: %w", err) - } + if bootstrapLibP2P { + log.Infof("Bootstrapping libp2p network with full node") - if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { - return xerrors.Errorf("connecting to full node (libp2p): %w", err) + // Bootstrap with full node + remoteAddrs, err := nodeApi.NetAddrsListen(ctx) + if err != nil { + return xerrors.Errorf("getting full node libp2p address: %w", err) + } + + if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil { + return xerrors.Errorf("connecting to full node (libp2p): %w", err) + } } log.Infof("Remote version %s", v) - lst, err := manet.Listen(endpoint) + // Instantiate the miner node handler. + handler, err := node.MinerHandler(minerapi, true) if err != nil { - return xerrors.Errorf("could not listen: %w", err) + return xerrors.Errorf("failed to instantiate rpc handler: %w", err) } - mux := mux.NewRouter() - - rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", api.PermissionedStorMinerAPI(metrics.MetricedStorMinerAPI(minerapi))) - - mux.Handle("/rpc/v0", rpcServer) - mux.PathPrefix("/remote").HandlerFunc(minerapi.(*impl.StorageMinerAPI).ServeRemote) - mux.Handle("/debug/metrics", metrics.Exporter()) - mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - - ah := &auth.Handler{ - Verify: minerapi.AuthVerify, - Next: mux.ServeHTTP, + // Serve the RPC. + rpcStopper, err := node.ServeRPC(handler, "lotus-miner", endpoint) + if err != nil { + return fmt.Errorf("failed to start json-rpc endpoint: %s", err) } - srv := &http.Server{ - Handler: ah, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-miner")) - return ctx - }, - } + // Monitor for shutdown. + finishCh := node.MonitorShutdown(shutdownChan, + node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, + node.ShutdownHandler{Component: "miner", StopFunc: stop}, + ) - sigChan := make(chan os.Signal, 2) - go func() { - select { - case sig := <-sigChan: - log.Warnw("received shutdown", "signal", sig) - case <-shutdownChan: - log.Warn("received shutdown") - } - - log.Warn("Shutting down...") - if err := stop(context.TODO()); err != nil { - log.Errorf("graceful shutting down failed: %s", err) - } - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() - signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT) - - return srv.Serve(manet.NetListener(lst)) + <-finishCh + return nil }, } diff --git a/cmd/lotus-storage-miner/sealing.go b/cmd/lotus-storage-miner/sealing.go index ad890129d..3bf4c675f 100644 --- a/cmd/lotus-storage-miner/sealing.go +++ b/cmd/lotus-storage-miner/sealing.go @@ -36,10 +36,16 @@ var sealingWorkersCmd = &cli.Command{ Name: "workers", Usage: "list workers", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -127,14 +133,20 @@ var sealingJobsCmd = &cli.Command{ Name: "jobs", Usage: "list running jobs", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, &cli.BoolFlag{ Name: "show-ret-done", Usage: "show returned but not consumed calls", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { diff --git a/cmd/lotus-storage-miner/sectors.go b/cmd/lotus-storage-miner/sectors.go index 2476c16e8..5c4581bbc 100644 --- a/cmd/lotus-storage-miner/sectors.go +++ b/cmd/lotus-storage-miner/sectors.go @@ -161,9 +161,10 @@ var sectorsListCmd = &cli.Command{ Usage: "show removed sectors", }, &cli.BoolFlag{ - Name: "color", - Aliases: []string{"c"}, - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + Aliases: []string{"c"}, }, &cli.BoolFlag{ Name: "fast", @@ -183,7 +184,9 @@ var sectorsListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -436,6 +439,12 @@ var sectorsExtendCmd = &cli.Command{ Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs", Required: false, }, + &cli.Int64Flag{ + Name: "expiration-ignore", + Value: 120, + Usage: "when extending v1 sectors, skip sectors whose current expiration is less than epochs from now", + Required: false, + }, &cli.Int64Flag{ Name: "expiration-cutoff", Usage: "when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified)", @@ -494,6 +503,10 @@ var sectorsExtendCmd = &cli.Command{ continue } + if si.Expiration < (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-ignore"))) { + continue + } + if cctx.IsSet("expiration-cutoff") { if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) { continue @@ -508,6 +521,10 @@ var sectorsExtendCmd = &cli.Command{ // Set the new expiration to 48 hours less than the theoretical maximum lifetime newExp := ml - (miner3.WPoStProvingPeriod * 2) + si.Activation + if withinTolerance(si.Expiration, newExp) || si.Expiration >= newExp { + continue + } + p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK) if err != nil { return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err) @@ -525,7 +542,7 @@ var sectorsExtendCmd = &cli.Command{ } else { added := false for exp := range es { - if withinTolerance(exp, newExp) { + if withinTolerance(exp, newExp) && newExp >= exp && exp > si.Expiration { es[exp] = append(es[exp], uint64(si.SectorNumber)) added = true break diff --git a/cmd/lotus-storage-miner/storage.go b/cmd/lotus-storage-miner/storage.go index f2068ea86..e7508eb29 100644 --- a/cmd/lotus-storage-miner/storage.go +++ b/cmd/lotus-storage-miner/storage.go @@ -166,13 +166,19 @@ var storageListCmd = &cli.Command{ Name: "list", Usage: "list local storage paths", Flags: []cli.Flag{ - &cli.BoolFlag{Name: "color"}, + &cli.BoolFlag{ + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, }, Subcommands: []*cli.Command{ storageListSectorsCmd, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { @@ -478,12 +484,15 @@ var storageListSectorsCmd = &cli.Command{ Usage: "get list of all sector files", Flags: []cli.Flag{ &cli.BoolFlag{ - Name: "color", - Value: true, + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", }, }, Action: func(cctx *cli.Context) error { - color.NoColor = !cctx.Bool("color") + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { diff --git a/cmd/lotus-wallet/main.go b/cmd/lotus-wallet/main.go index 2c86c6180..3e3aa1a58 100644 --- a/cmd/lotus-wallet/main.go +++ b/cmd/lotus-wallet/main.go @@ -2,27 +2,33 @@ package main import ( "context" + "fmt" "net" "net/http" "os" "github.com/filecoin-project/lotus/api/v0api" + "github.com/gbrlsnchs/jwt/v3" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "golang.org/x/xerrors" "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/repo" ) @@ -30,17 +36,33 @@ var log = logging.Logger("main") const FlagWalletRepo = "wallet-repo" +type jwtPayload struct { + Allow []auth.Permission +} + func main() { lotuslog.SetupLogLevels() local := []*cli.Command{ runCmd, + getApiKeyCmd, } app := &cli.App{ Name: "lotus-wallet", Usage: "Basic external wallet", Version: build.UserVersion(), + Description: ` +lotus-wallet provides a remote wallet service for lotus. + +To configure your lotus node to use a remote wallet: +* Run 'lotus-wallet get-api-key' to generate API key +* Start lotus-wallet using 'lotus-wallet run' (see --help for additional flags) +* Edit lotus config (~/.lotus/config.toml) + * Find the '[Wallet]' section + * Set 'RemoteBackend' to '[api key]:http://[wallet ip]:[wallet port]' + (the default port is 1777) +* Start (or restart) the lotus daemon`, Flags: []cli.Flag{ &cli.StringFlag{ Name: FlagWalletRepo, @@ -65,6 +87,35 @@ func main() { } } +var getApiKeyCmd = &cli.Command{ + Name: "get-api-key", + Usage: "Generate API Key", + Action: func(cctx *cli.Context) error { + lr, ks, err := openRepo(cctx) + if err != nil { + return err + } + defer lr.Close() // nolint + + p := jwtPayload{ + Allow: []auth.Permission{api.PermAdmin}, + } + + authKey, err := modules.APISecret(ks, lr) + if err != nil { + return xerrors.Errorf("setting up api secret: %w", err) + } + + k, err := jwt.Sign(&p, (*jwt.HMACSHA)(authKey)) + if err != nil { + return xerrors.Errorf("jwt sign: %w", err) + } + + fmt.Println(string(k)) + return nil + }, +} + var runCmd = &cli.Command{ Name: "run", Usage: "Start lotus wallet", @@ -86,7 +137,13 @@ var runCmd = &cli.Command{ Name: "offline", Usage: "don't query chain state in interactive mode", }, + &cli.BoolFlag{ + Name: "disable-auth", + Usage: "(insecure) disable api auth", + Hidden: true, + }, }, + Description: "For setup instructions see 'lotus-wallet --help'", Action: func(cctx *cli.Context) error { log.Info("Starting lotus wallet") @@ -101,31 +158,11 @@ var runCmd = &cli.Command{ log.Fatalf("Cannot register the view: %v", err) } - repoPath := cctx.String(FlagWalletRepo) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - if !ok { - if err := r.Init(repo.Worker); err != nil { - return err - } - } - - lr, err := r.Lock(repo.Wallet) - if err != nil { - return err - } - - ks, err := lr.KeyStore() + lr, ks, err := openRepo(cctx) if err != nil { return err } + defer lr.Close() // nolint lw, err := wallet.NewWallet(ks) if err != nil { @@ -167,19 +204,43 @@ var runCmd = &cli.Command{ w = &LoggedWallet{under: w} } + rpcApi := metrics.MetricedWalletAPI(w) + if !cctx.Bool("disable-auth") { + rpcApi = api.PermissionedWalletAPI(rpcApi) + } + rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", metrics.MetricedWalletAPI(w)) + rpcServer.Register("Filecoin", rpcApi) mux.Handle("/rpc/v0", rpcServer) mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - /*ah := &auth.Handler{ - Verify: nodeApi.AuthVerify, - Next: mux.ServeHTTP, - }*/ + var handler http.Handler = mux + + if !cctx.Bool("disable-auth") { + authKey, err := modules.APISecret(ks, lr) + if err != nil { + return xerrors.Errorf("setting up api secret: %w", err) + } + + authVerify := func(ctx context.Context, token string) ([]auth.Permission, error) { + var payload jwtPayload + if _, err := jwt.Verify([]byte(token), (*jwt.HMACSHA)(authKey), &payload); err != nil { + return nil, xerrors.Errorf("JWT Verification failed: %w", err) + } + + return payload.Allow, nil + } + + log.Info("API auth enabled, use 'lotus-wallet get-api-key' to get API key") + handler = &auth.Handler{ + Verify: authVerify, + Next: mux.ServeHTTP, + } + } srv := &http.Server{ - Handler: mux, + Handler: handler, BaseContext: func(listener net.Listener) context.Context { ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-wallet")) return ctx @@ -203,3 +264,33 @@ var runCmd = &cli.Command{ return srv.Serve(nl) }, } + +func openRepo(cctx *cli.Context) (repo.LockedRepo, types.KeyStore, error) { + repoPath := cctx.String(FlagWalletRepo) + r, err := repo.NewFS(repoPath) + if err != nil { + return nil, nil, err + } + + ok, err := r.Exists() + if err != nil { + return nil, nil, err + } + if !ok { + if err := r.Init(repo.Worker); err != nil { + return nil, nil, err + } + } + + lr, err := r.Lock(repo.Wallet) + if err != nil { + return nil, nil, err + } + + ks, err := lr.KeyStore() + if err != nil { + return nil, nil, err + } + + return lr, ks, nil +} diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 644892ee2..0d5961aae 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -15,6 +15,7 @@ import ( "runtime/pprof" "strings" + "github.com/filecoin-project/go-jsonrpc" paramfetch "github.com/filecoin-project/go-paramfetch" metricsprom "github.com/ipfs/go-metrics-prometheus" "github.com/mitchellh/go-homedir" @@ -313,7 +314,7 @@ var DaemonCmd = &cli.Command{ stop, err := node.New(ctx, node.FullAPI(&api, node.Lite(isLite)), - node.Online(), + node.Base(), node.Repo(r), node.Override(new(dtypes.Bootstrapper), isBootstrapper), @@ -351,8 +352,37 @@ var DaemonCmd = &cli.Command{ return xerrors.Errorf("getting api endpoint: %w", err) } + // + // Instantiate JSON-RPC endpoint. + // ---- + + // Populate JSON-RPC options. + serverOptions := make([]jsonrpc.ServerOption, 0) + if maxRequestSize := cctx.Int("api-max-req-size"); maxRequestSize != 0 { + serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(int64(maxRequestSize))) + } + + // Instantiate the full node handler. + h, err := node.FullNodeHandler(api, true, serverOptions...) + if err != nil { + return fmt.Errorf("failed to instantiate rpc handler: %s", err) + } + + // Serve the RPC. + rpcStopper, err := node.ServeRPC(h, "lotus-daemon", endpoint) + if err != nil { + return fmt.Errorf("failed to start json-rpc endpoint: %s", err) + } + + // Monitor for shutdown. + finishCh := node.MonitorShutdown(shutdownChan, + node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, + node.ShutdownHandler{Component: "node", StopFunc: stop}, + ) + <-finishCh // fires when shutdown is complete. + // TODO: properly parse api endpoint (or make it a URL) - return serveRPC(api, stop, endpoint, shutdownChan, int64(cctx.Int("api-max-req-size"))) + return nil }, Subcommands: []*cli.Command{ daemonStopCmd, diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index af9c56735..63d01f891 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -2,7 +2,10 @@ package main import ( "context" + "os" + logging "github.com/ipfs/go-log/v2" + "github.com/mattn/go-isatty" "github.com/urfave/cli/v2" "go.opencensus.io/trace" @@ -14,6 +17,8 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) +var log = logging.Logger("main") + var AdvanceBlockCmd *cli.Command func main() { @@ -52,6 +57,8 @@ func main() { ctx, span := trace.StartSpan(context.Background(), "/cli") defer span.End() + interactiveDef := isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) + app := &cli.App{ Name: "lotus", Usage: "Filecoin decentralized storage network client", @@ -64,10 +71,20 @@ func main() { Hidden: true, Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME }, + &cli.BoolFlag{ + Name: "interactive", + Usage: "setting to false will disable interactive functionality of commands", + Value: interactiveDef, + }, + &cli.BoolFlag{ + Name: "force-send", + Usage: "if true, will ignore pre-send checks", + }, }, Commands: append(local, lcli.Commands...), } + app.Setup() app.Metadata["traceContext"] = ctx app.Metadata["repoType"] = repo.FullNode diff --git a/cmd/lotus/pprof.go b/cmd/lotus/pprof.go deleted file mode 100644 index ea6823e48..000000000 --- a/cmd/lotus/pprof.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "net/http" - "strconv" -) - -func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { - return func(rw http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed) - return - } - if err := r.ParseForm(); err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } - - asfr := r.Form.Get("x") - if len(asfr) == 0 { - http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest) - return - } - - fr, err := strconv.Atoi(asfr) - if err != nil { - http.Error(rw, err.Error(), http.StatusBadRequest) - return - } - log.Infof("setting %s to %d", name, fr) - setter(fr) - } -} diff --git a/cmd/lotus/rpc.go b/cmd/lotus/rpc.go deleted file mode 100644 index 95050d639..000000000 --- a/cmd/lotus/rpc.go +++ /dev/null @@ -1,138 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "net" - "net/http" - _ "net/http/pprof" - "os" - "os/signal" - "runtime" - "syscall" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr/net" - "go.opencensus.io/tag" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" -) - -var log = logging.Logger("main") - -func serveRPC(a v1api.FullNode, stop node.StopFunc, addr multiaddr.Multiaddr, shutdownCh <-chan struct{}, maxRequestSize int64) error { - serverOptions := make([]jsonrpc.ServerOption, 0) - if maxRequestSize != 0 { // config set - serverOptions = append(serverOptions, jsonrpc.WithMaxRequestSize(maxRequestSize)) - } - serveRpc := func(path string, hnd interface{}) { - rpcServer := jsonrpc.NewServer(serverOptions...) - rpcServer.Register("Filecoin", hnd) - - ah := &auth.Handler{ - Verify: a.AuthVerify, - Next: rpcServer.ServeHTTP, - } - - http.Handle(path, ah) - } - - pma := api.PermissionedFullAPI(metrics.MetricedFullAPI(a)) - - serveRpc("/rpc/v1", pma) - serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: pma}) - - importAH := &auth.Handler{ - Verify: a.AuthVerify, - Next: handleImport(a.(*impl.FullNodeAPI)), - } - - http.Handle("/rest/v0/import", importAH) - - http.Handle("/debug/metrics", metrics.Exporter()) - http.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate)) - http.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction", - func(x int) { runtime.SetMutexProfileFraction(x) }, - )) - - lst, err := manet.Listen(addr) - if err != nil { - return xerrors.Errorf("could not listen: %w", err) - } - - srv := &http.Server{ - Handler: http.DefaultServeMux, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-daemon")) - return ctx - }, - } - - sigCh := make(chan os.Signal, 2) - shutdownDone := make(chan struct{}) - go func() { - select { - case sig := <-sigCh: - log.Warnw("received shutdown", "signal", sig) - case <-shutdownCh: - log.Warn("received shutdown") - } - - log.Warn("Shutting down...") - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - if err := stop(context.TODO()); err != nil { - log.Errorf("graceful shutting down failed: %s", err) - } - log.Warn("Graceful shutdown successful") - _ = log.Sync() //nolint:errcheck - close(shutdownDone) - }() - signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) - - err = srv.Serve(manet.NetListener(lst)) - if err == http.ErrServerClosed { - <-shutdownDone - return nil - } - return err -} - -func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - if r.Method != "PUT" { - w.WriteHeader(404) - return - } - if !auth.HasPerm(r.Context(), nil, api.PermWrite) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) - return - } - - c, err := a.ClientImportLocal(r.Context(), r.Body) - if err != nil { - w.WriteHeader(500) - _ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()}) - return - } - w.WriteHeader(200) - err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c}) - if err != nil { - log.Errorf("/rest/v0/import: Writing response failed: %+v", err) - return - } - } -} diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go index 8e993cbd3..71035867f 100644 --- a/cmd/tvx/extract_message.go +++ b/cmd/tvx/extract_message.go @@ -337,6 +337,9 @@ func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, blo if err != nil { return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err) } + if msgInfo == nil { + return nil, nil, nil, fmt.Errorf("failed to locate message: not found") + } log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode) diff --git a/conformance/driver.go b/conformance/driver.go index 70100700e..c7fc0d6c4 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -141,16 +141,11 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params blocks = append(blocks, sb) } - var ( - messages []*types.Message - results []*vm.ApplyRet - ) - - recordOutputs := func(_ cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - messages = append(messages, msg) - results = append(results, ret) - return nil + recordOutputs := &outputRecorder{ + messages: []*types.Message{}, + results: []*vm.ApplyRet{}, } + postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(), params.ParentEpoch, params.Preroot, @@ -169,8 +164,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params ret := &ExecuteTipsetResult{ ReceiptsRoot: receiptsroot, PostStateRoot: postcid, - AppliedMessages: messages, - AppliedResults: results, + AppliedMessages: recordOutputs.messages, + AppliedResults: recordOutputs.results, } return ret, nil } @@ -284,3 +279,14 @@ func CircSupplyOrDefault(circSupply *gobig.Int) abi.TokenAmount { } return big.NewFromGo(circSupply) } + +type outputRecorder struct { + messages []*types.Message + results []*vm.ApplyRet +} + +func (o *outputRecorder) MessageApplied(ctx context.Context, ts *types.TipSet, mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet, implicit bool) error { + o.messages = append(o.messages, msg) + o.results = append(o.results, ret) + return nil +} diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 53d485815..b488e8996 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -98,6 +98,7 @@ * [SealingAbort](#SealingAbort) * [SealingSchedDiag](#SealingSchedDiag) * [Sector](#Sector) + * [SectorAddPieceToAny](#SectorAddPieceToAny) * [SectorCommitFlush](#SectorCommitFlush) * [SectorCommitPending](#SectorCommitPending) * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) @@ -118,6 +119,7 @@ * [SectorsRefs](#SectorsRefs) * [SectorsStatus](#SectorsStatus) * [SectorsSummary](#SectorsSummary) + * [SectorsUnsealPiece](#SectorsUnsealPiece) * [SectorsUpdate](#SectorsUpdate) * [Storage](#Storage) * [StorageAddLocal](#StorageAddLocal) @@ -227,6 +229,7 @@ Response: "PreCommitControl": null, "CommitControl": null, "TerminateControl": null, + "DealPublishControl": null, "DisableOwnerFallback": true, "DisableWorkerFallback": true } @@ -889,8 +892,8 @@ Inputs: `null` Response: ```json { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ``` @@ -1039,8 +1042,8 @@ Inputs: ```json [ { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ] ``` @@ -1090,8 +1093,8 @@ Inputs: Response: ```json { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ``` @@ -1560,6 +1563,54 @@ Response: `{}` ## Sector +### SectorAddPieceToAny +Add piece to an open sector. If no sectors with enough space are open, +either a new sector will be created, or this call will block until more +sectors can be created. + + +Perms: admin + +Inputs: +```json +[ + 1024, + {}, + { + "PublishCid": null, + "DealID": 5432, + "DealProposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealSchedule": { + "StartEpoch": 10101, + "EndEpoch": 10101 + }, + "KeepUnsealed": true + } +] +``` + +Response: +```json +{ + "Sector": 9, + "Offset": 1032 +} +``` + ### SectorCommitFlush SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit. Returns null if message wasn't sent @@ -1860,6 +1911,30 @@ Response: } ``` +### SectorsUnsealPiece + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + 1040384, + 1024, + null, + null +] +``` + +Response: `{}` + ### SectorsUpdate @@ -2205,6 +2280,7 @@ Response: "ef8d99a2-6865-4189-8ffa-9fef0f806eee": { "Info": { "Hostname": "host", + "IgnoreResources": false, "Resources": { "MemPhysical": 274877906944, "MemSwap": 128849018880, diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md index b0130a2a0..c620113f4 100644 --- a/documentation/en/api-v0-methods-worker.md +++ b/documentation/en/api-v0-methods-worker.md @@ -15,8 +15,6 @@ * [MoveStorage](#MoveStorage) * [Process](#Process) * [ProcessSession](#ProcessSession) -* [Read](#Read) - * [ReadPiece](#ReadPiece) * [Release](#Release) * [ReleaseUnsealed](#ReleaseUnsealed) * [Seal](#Seal) @@ -91,6 +89,7 @@ Response: ```json { "Hostname": "string value", + "IgnoreResources": true, "Resources": { "MemPhysical": 42, "MemSwap": 42, @@ -263,41 +262,6 @@ Inputs: `null` Response: `"07070707-0707-0707-0707-070707070707"` -## Read - - -### ReadPiece - - -Perms: admin - -Inputs: -```json -[ - {}, - { - "ID": { - "Miner": 1000, - "Number": 9 - }, - "ProofType": 8 - }, - 1040384, - 1024 -] -``` - -Response: -```json -{ - "Sector": { - "Miner": 1000, - "Number": 9 - }, - "ID": "07070707-0707-0707-0707-070707070707" -} -``` - ## Release diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 8bd6d71a5..4466cde8c 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -17,6 +17,7 @@ * [ChainGetBlockMessages](#ChainGetBlockMessages) * [ChainGetGenesis](#ChainGetGenesis) * [ChainGetMessage](#ChainGetMessage) + * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset) * [ChainGetNode](#ChainGetNode) * [ChainGetParentMessages](#ChainGetParentMessages) * [ChainGetParentReceipts](#ChainGetParentReceipts) @@ -44,11 +45,13 @@ * [ClientGetDealInfo](#ClientGetDealInfo) * [ClientGetDealStatus](#ClientGetDealStatus) * [ClientGetDealUpdates](#ClientGetDealUpdates) + * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates) * [ClientHasLocal](#ClientHasLocal) * [ClientImport](#ClientImport) * [ClientListDataTransfers](#ClientListDataTransfers) * [ClientListDeals](#ClientListDeals) * [ClientListImports](#ClientListImports) + * [ClientListRetrievals](#ClientListRetrievals) * [ClientMinerQueryOffer](#ClientMinerQueryOffer) * [ClientQueryAsk](#ClientQueryAsk) * [ClientRemoveImport](#ClientRemoveImport) @@ -57,6 +60,7 @@ * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) * [ClientStartDeal](#ClientStartDeal) + * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) * [CreateBackup](#CreateBackup) * [Gas](#Gas) @@ -530,6 +534,28 @@ Response: } ``` +### ChainGetMessagesInTipset +ChainGetMessagesInTipset returns message stores in current tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + ### ChainGetNode @@ -1196,6 +1222,54 @@ Response: } ``` +### ClientGetRetrievalUpdates +ClientGetRetrievalUpdates returns status of updated retrieval deals + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } +} +``` + ### ClientHasLocal ClientHasLocal indicates whether a certain CID is locally stored. @@ -1263,6 +1337,17 @@ Response: `null` ClientListImports lists imported files and their root CIDs +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListRetrievals +ClientQueryAsk returns a signed StorageAsk from the specified miner. +ClientListRetrievals returns information about retrievals made by the local client + + Perms: write Inputs: `null` @@ -1309,7 +1394,6 @@ Response: ``` ### ClientQueryAsk -ClientQueryAsk returns a signed StorageAsk from the specified miner. Perms: read @@ -1501,6 +1585,39 @@ Inputs: Response: `null` +### ClientStatelessDeal +ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + + +Perms: write + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "Wallet": "f01234", + "Miner": "f01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + ## Create @@ -2411,7 +2528,7 @@ using both transaction ID and a hash of the parameters used in the proposal. This method of approval can be used to ensure you only approve exactly the transaction you think you are. It takes the following params: , , , , , -, , +, , Perms: sign @@ -2747,8 +2864,8 @@ Inputs: `null` Response: ```json { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ``` @@ -2897,8 +3014,8 @@ Inputs: ```json [ { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ] ``` @@ -2948,8 +3065,8 @@ Inputs: Response: ```json { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ``` diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 282084204..ef151a94d 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -17,6 +17,7 @@ * [ChainGetBlockMessages](#ChainGetBlockMessages) * [ChainGetGenesis](#ChainGetGenesis) * [ChainGetMessage](#ChainGetMessage) + * [ChainGetMessagesInTipset](#ChainGetMessagesInTipset) * [ChainGetNode](#ChainGetNode) * [ChainGetParentMessages](#ChainGetParentMessages) * [ChainGetParentReceipts](#ChainGetParentReceipts) @@ -44,11 +45,13 @@ * [ClientGetDealInfo](#ClientGetDealInfo) * [ClientGetDealStatus](#ClientGetDealStatus) * [ClientGetDealUpdates](#ClientGetDealUpdates) + * [ClientGetRetrievalUpdates](#ClientGetRetrievalUpdates) * [ClientHasLocal](#ClientHasLocal) * [ClientImport](#ClientImport) * [ClientListDataTransfers](#ClientListDataTransfers) * [ClientListDeals](#ClientListDeals) * [ClientListImports](#ClientListImports) + * [ClientListRetrievals](#ClientListRetrievals) * [ClientMinerQueryOffer](#ClientMinerQueryOffer) * [ClientQueryAsk](#ClientQueryAsk) * [ClientRemoveImport](#ClientRemoveImport) @@ -57,6 +60,7 @@ * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) * [ClientStartDeal](#ClientStartDeal) + * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) * [CreateBackup](#CreateBackup) * [Gas](#Gas) @@ -82,6 +86,9 @@ * [MpoolBatchPush](#MpoolBatchPush) * [MpoolBatchPushMessage](#MpoolBatchPushMessage) * [MpoolBatchPushUntrusted](#MpoolBatchPushUntrusted) + * [MpoolCheckMessages](#MpoolCheckMessages) + * [MpoolCheckPendingMessages](#MpoolCheckPendingMessages) + * [MpoolCheckReplaceMessages](#MpoolCheckReplaceMessages) * [MpoolClear](#MpoolClear) * [MpoolGetConfig](#MpoolGetConfig) * [MpoolGetNonce](#MpoolGetNonce) @@ -126,6 +133,8 @@ * [NetPeerInfo](#NetPeerInfo) * [NetPeers](#NetPeers) * [NetPubsubScores](#NetPubsubScores) +* [Node](#Node) + * [NodeStatus](#NodeStatus) * [Paych](#Paych) * [PaychAllocateLane](#PaychAllocateLane) * [PaychAvailableFunds](#PaychAvailableFunds) @@ -527,6 +536,28 @@ Response: } ``` +### ChainGetMessagesInTipset +ChainGetMessagesInTipset returns message stores in current tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + ### ChainGetNode @@ -1193,6 +1224,54 @@ Response: } ``` +### ClientGetRetrievalUpdates +ClientGetRetrievalUpdates returns status of updated retrieval deals + + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": null + } + } +} +``` + ### ClientHasLocal ClientHasLocal indicates whether a certain CID is locally stored. @@ -1260,6 +1339,16 @@ Response: `null` ClientListImports lists imported files and their root CIDs +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListRetrievals +ClientListRetrievals returns information about retrievals made by the local client + + Perms: write Inputs: `null` @@ -1498,6 +1587,39 @@ Inputs: Response: `null` +### ClientStatelessDeal +ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking. + + +Perms: write + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "Wallet": "f01234", + "Miner": "f01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + ## Create @@ -1991,6 +2113,51 @@ Inputs: Response: `null` +### MpoolCheckMessages +MpoolCheckMessages performs logical checks on a batch of messages + + +Perms: read + +Inputs: +```json +[ + null +] +``` + +Response: `null` + +### MpoolCheckPendingMessages +MpoolCheckPendingMessages performs logical checks for all pending messages from a given address + + +Perms: read + +Inputs: +```json +[ + "f01234" +] +``` + +Response: `null` + +### MpoolCheckReplaceMessages +MpoolCheckReplaceMessages performs logical checks on pending messages with replacement + + +Perms: read + +Inputs: +```json +[ + null +] +``` + +Response: `null` + ### MpoolClear MpoolClear clears pending messages from the mpool @@ -2324,7 +2491,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2350,7 +2532,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2375,7 +2572,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2398,7 +2610,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2430,7 +2657,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2458,7 +2700,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2485,7 +2742,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2622,7 +2894,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2649,7 +2936,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2676,7 +2978,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2702,7 +3019,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2727,7 +3059,22 @@ Inputs: Response: ```json { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true } ``` @@ -2744,8 +3091,8 @@ Inputs: `null` Response: ```json { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ``` @@ -2894,8 +3241,8 @@ Inputs: ```json [ { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ] ``` @@ -2945,8 +3292,8 @@ Inputs: Response: ```json { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [] } ``` @@ -3000,6 +3347,40 @@ Inputs: `null` Response: `null` +## Node +These methods are general node management and status commands + + +### NodeStatus +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + true +] +``` + +Response: +```json +{ + "SyncStatus": { + "Epoch": 42, + "Behind": 42 + }, + "PeerStatus": { + "PeersToPublishMsgs": 123, + "PeersToPublishBlocks": 123 + }, + "ChainStatus": { + "BlocksPerTipsetLast100": 12.3, + "BlocksPerTipsetLastFinality": 12.3 + } +} +``` + ## Paych The Paych methods are for interacting with and managing payment channels diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index dfa9072c9..aa51ab23e 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.11.0-dev + 1.11.1-dev COMMANDS: init Initialize a lotus miner repo @@ -41,7 +41,7 @@ COMMANDS: GLOBAL OPTIONS: --actor value, -a value specify other actor to check state for (read only) - --color (default: false) + --color use color in display output (default: depends on output being a TTY) --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] --help, -h show help (default: false) --version, -v print the version (default: false) @@ -57,6 +57,7 @@ USAGE: COMMANDS: restore Initialize a lotus miner repo from a backup + service Initialize a lotus miner sub-service help, h Shows a list of commands or help for one command OPTIONS: @@ -93,6 +94,24 @@ OPTIONS: ``` +### lotus-miner init service +``` +NAME: + lotus-miner init service - Initialize a lotus miner sub-service + +USAGE: + lotus-miner init service [command options] [backupFile] + +OPTIONS: + --config value config file (config.toml) + --nosync don't check full-node sync status (default: false) + --type value type of service to be enabled + --api-sealer value sealer API info (lotus-miner auth api-info --perm=admin) + --api-sector-index value sector Index API info (lotus-miner auth api-info --perm=admin) + --help, -h show help (default: false) + +``` + ## lotus-miner run ``` NAME: @@ -295,7 +314,7 @@ USAGE: OPTIONS: --verbose (default: false) - --color (default: true) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) ``` @@ -745,7 +764,7 @@ COMMANDS: selection Configure acceptance criteria for retrieval deal proposals list List all active retrieval deals for this miner set-ask Configure the provider's retrieval ask - get-ask Get the provider's current retrieval ask + get-ask Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command help, h Shows a list of commands or help for one command OPTIONS: @@ -848,7 +867,7 @@ OPTIONS: ### lotus-miner retrieval-deals get-ask ``` NAME: - lotus-miner retrieval-deals get-ask - Get the provider's current retrieval ask + lotus-miner retrieval-deals get-ask - Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command USAGE: lotus-miner retrieval-deals get-ask [command options] [arguments...] @@ -888,7 +907,7 @@ USAGE: OPTIONS: --verbose, -v print verbose transfer details (default: false) - --color use color in display output (default: true) + --color use color in display output (default: depends on output being a TTY) --completed show completed data transfers (default: false) --watch watch deal updates in real-time, rather than a one time list (default: false) --show-failed show failed/cancelled transfers (default: false) @@ -1344,7 +1363,7 @@ USAGE: OPTIONS: --show-removed show removed sectors (default: false) - --color, -c (default: true) + --color, -c use color in display output (default: depends on output being a TTY) --fast don't show on-chain info for better performance (default: false) --events display number of events the sector has received (default: false) --seal-time display how long it took for the sector to be sealed (default: false) @@ -1405,6 +1424,7 @@ OPTIONS: --new-expiration value new expiration epoch (default: 0) --v1-sectors renews all v1 sectors up to the maximum possible lifetime (default: false) --tolerance value when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs (default: 20160) + --expiration-ignore value when extending v1 sectors, skip sectors whose current expiration is less than epochs from now (default: 120) --expiration-cutoff value when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified) (default: 0) --help, -h show help (default: false) @@ -1533,9 +1553,9 @@ USAGE: lotus-miner sectors batching command [command options] [arguments...] COMMANDS: - pending-commit list sectors waiting in commit batch queue - pending-precommit list sectors waiting in precommit batch queue - help, h Shows a list of commands or help for one command + commit list sectors waiting in commit batch queue + precommit list sectors waiting in precommit batch queue + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1543,13 +1563,13 @@ OPTIONS: ``` -#### lotus-miner sectors batching pending-commit +#### lotus-miner sectors batching commit ``` NAME: - lotus-miner sectors batching pending-commit - list sectors waiting in commit batch queue + lotus-miner sectors batching commit - list sectors waiting in commit batch queue USAGE: - lotus-miner sectors batching pending-commit [command options] [arguments...] + lotus-miner sectors batching commit [command options] [arguments...] OPTIONS: --publish-now send a batch now (default: false) @@ -1557,13 +1577,13 @@ OPTIONS: ``` -#### lotus-miner sectors batching pending-precommit +#### lotus-miner sectors batching precommit ``` NAME: - lotus-miner sectors batching pending-precommit - list sectors waiting in precommit batch queue + lotus-miner sectors batching precommit - list sectors waiting in precommit batch queue USAGE: - lotus-miner sectors batching pending-precommit [command options] [arguments...] + lotus-miner sectors batching precommit [command options] [arguments...] OPTIONS: --publish-now send a batch now (default: false) @@ -1739,7 +1759,7 @@ COMMANDS: help, h Shows a list of commands or help for one command OPTIONS: - --color (default: false) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) --version, -v print the version (default: false) @@ -1754,7 +1774,7 @@ USAGE: lotus-miner storage list sectors [command options] [arguments...] OPTIONS: - --color (default: true) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) ``` @@ -1816,7 +1836,7 @@ USAGE: lotus-miner sealing jobs [command options] [arguments...] OPTIONS: - --color (default: false) + --color use color in display output (default: depends on output being a TTY) --show-ret-done show returned but not consumed calls (default: false) --help, -h show help (default: false) @@ -1831,7 +1851,7 @@ USAGE: lotus-miner sealing workers [command options] [arguments...] OPTIONS: - --color (default: false) + --color use color in display output (default: depends on output being a TTY) --help, -h show help (default: false) ``` diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md new file mode 100644 index 000000000..dbfc8da29 --- /dev/null +++ b/documentation/en/cli-lotus-worker.md @@ -0,0 +1,171 @@ +# lotus-worker +``` +NAME: + lotus-worker - Remote miner worker + +USAGE: + lotus-worker [global options] command [command options] [arguments...] + +VERSION: + 1.11.1-dev + +COMMANDS: + run Start lotus worker + info Print worker info + storage manage sector storage + set Manage worker settings + wait-quiet Block until all running tasks exit + tasks Manage task processing + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --worker-repo value, --workerrepo value Specify worker repo path. flag workerrepo and env WORKER_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusworker") [$LOTUS_WORKER_PATH, $WORKER_PATH] + --miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --enable-gpu-proving enable use of GPU for mining operations (default: true) + --help, -h show help (default: false) + --version, -v print the version (default: false) +``` + +## lotus-worker run +``` +NAME: + lotus-worker run - Start lotus worker + +USAGE: + lotus-worker run [command options] [arguments...] + +OPTIONS: + --listen value host address and port the worker api will listen on (default: "0.0.0.0:3456") + --no-local-storage don't use storageminer repo for sector storage (default: false) + --no-swap don't use swap (default: false) + --addpiece enable addpiece (default: true) + --precommit1 enable precommit1 (32G sectors: 1 core, 128GiB Memory) (default: true) + --unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true) + --precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true) + --commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true) + --parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5) + --timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") + --help, -h show help (default: false) + +``` + +## lotus-worker info +``` +NAME: + lotus-worker info - Print worker info + +USAGE: + lotus-worker info [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-worker storage +``` +NAME: + lotus-worker storage - manage sector storage + +USAGE: + lotus-worker storage command [command options] [arguments...] + +COMMANDS: + attach attach local storage path + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-worker storage attach +``` +NAME: + lotus-worker storage attach - attach local storage path + +USAGE: + lotus-worker storage attach [command options] [arguments...] + +OPTIONS: + --init initialize the path first (default: false) + --weight value (for init) path weight (default: 10) + --seal (for init) use path for sealing (default: false) + --store (for init) use path for long-term storage (default: false) + --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --help, -h show help (default: false) + +``` + +## lotus-worker set +``` +NAME: + lotus-worker set - Manage worker settings + +USAGE: + lotus-worker set [command options] [arguments...] + +OPTIONS: + --enabled enable/disable new task processing (default: true) + --help, -h show help (default: false) + +``` + +## lotus-worker wait-quiet +``` +NAME: + lotus-worker wait-quiet - Block until all running tasks exit + +USAGE: + lotus-worker wait-quiet [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + +## lotus-worker tasks +``` +NAME: + lotus-worker tasks - Manage task processing + +USAGE: + lotus-worker tasks command [command options] [arguments...] + +COMMANDS: + enable Enable a task type + disable Disable a task type + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-worker tasks enable +``` +NAME: + lotus-worker tasks enable - Enable a task type + +USAGE: + lotus-worker tasks enable [command options] [UNS|C2|PC2|PC1|AP] + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-worker tasks disable +``` +NAME: + lotus-worker tasks disable - Disable a task type + +USAGE: + lotus-worker tasks disable [command options] [UNS|C2|PC2|PC1|AP] + +OPTIONS: + --help, -h show help (default: false) + +``` diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index 82c8dfad3..6971ed6e7 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.11.0-dev + 1.11.1-dev COMMANDS: daemon Start a lotus daemon process @@ -15,12 +15,12 @@ COMMANDS: version Print version help, h Shows a list of commands or help for one command BASIC: - send Send funds between accounts - wallet Manage wallet - client Make deals, store data, retrieve data - msig Interact with a multisig wallet - verifreg Interact with the verified registry actor - paych Manage payment channels + send Send funds between accounts + wallet Manage wallet + client Make deals, store data, retrieve data + msig Interact with a multisig wallet + filplus Interact with the verified registry actor used by Filplus + paych Manage payment channels DEVELOPER: auth Manage RPC permissions mpool Manage message pool @@ -32,8 +32,12 @@ COMMANDS: NETWORK: net Manage P2P Network sync Inspect or interact with the chain syncer + STATUS: + status Check node status GLOBAL OPTIONS: + --interactive setting to false will disable interactive functionality of commands (default: false) + --force-send if true, will ignore pre-send checks (default: false) --help, -h show help (default: false) --version, -v print the version (default: false) ``` @@ -137,7 +141,7 @@ OPTIONS: --method value specify method to invoke (default: 0) --params-json value specify invocation parameters in json --params-hex value specify invocation parameters in hex - --force must be specified for the action to take effect if maybe SysErrInsufficientFunds etc (default: false) + --force Deprecated: use global 'force-send' (default: false) --help, -h show help (default: false) ``` @@ -373,6 +377,7 @@ COMMANDS: find Find data in the network retrieve Retrieve data from network cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer + list-retrievals List retrieval market deals STORAGE: deal Initialize storage deal with a miner query-ask Find a miners ask @@ -517,6 +522,27 @@ OPTIONS: ``` +### lotus client list-retrievals +``` +NAME: + lotus client list-retrievals - List retrieval market deals + +USAGE: + lotus client list-retrievals [command options] [arguments...] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --verbose, -v print verbose deal details (default: false) + --color use color in display output (default: depends on output being a TTY) + --show-failed show failed/failing deals (default: true) + --completed show completed retrievals (default: false) + --watch watch deal updates in real-time, rather than a one time list (default: false) + --help, -h show help (default: false) + +``` + ### lotus client deal ``` NAME: @@ -532,8 +558,8 @@ DESCRIPTION: Make a deal with a miner. dataCid comes from running 'lotus client import'. miner is the address of the miner you wish to make a deal with. -price is measured in FIL/GB/Epoch. Miners usually don't accept a bid -lower than their advertised ask. You can check a miners listed price +price is measured in FIL/Epoch. Miners usually don't accept a bid +lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price with 'lotus client query-ask '. duration is how long the miner should store the data for, in blocks. The minimum value is 518400 (6 months). @@ -541,6 +567,7 @@ The minimum value is 518400 (6 months). OPTIONS: --manual-piece-cid value manually specify piece commitment for data (dataCid must be to a car file) --manual-piece-size value if manually specifying piece cid, used to specify size (dataCid must be to a car file) (default: 0) + --manual-stateless-deal instructs the node to send an offline deal without registering it with the deallist/fsm (default: false) --from value specify address to fund the deal with --start-epoch value specify the epoch that the deal should start at (default: -1) --fast-retrieval indicates that data should be available for fast retrieval (default: true) @@ -582,7 +609,7 @@ CATEGORY: OPTIONS: --verbose, -v print verbose deal details (default: false) - --color use color in display output (default: true) + --color use color in display output (default: depends on output being a TTY) --show-failed show failed/failing deals (default: false) --watch watch deal updates in real-time, rather than a one time list (default: false) --help, -h show help (default: false) @@ -720,7 +747,7 @@ CATEGORY: OPTIONS: --verbose, -v print verbose transfer details (default: false) - --color use color in display output (default: true) + --color use color in display output (default: depends on output being a TTY) --completed show completed data transfers (default: false) --watch watch deal updates in real-time, rather than a one time list (default: false) --show-failed show failed/cancelled transfers (default: false) @@ -1030,21 +1057,21 @@ OPTIONS: ``` -## lotus verifreg +## lotus filplus ``` NAME: - lotus verifreg - Interact with the verified registry actor + lotus filplus - Interact with the verified registry actor used by Filplus USAGE: - lotus verifreg command [command options] [arguments...] + lotus filplus command [command options] [arguments...] COMMANDS: - verify-client give allowance to the specified verified client address - list-verifiers list all verifiers - list-clients list all verified clients - check-client check verified client remaining bytes - check-verifier check verifiers remaining bytes - help, h Shows a list of commands or help for one command + grant-datacap give allowance to the specified verified client address + list-notaries list all notaries + list-clients list all verified clients + check-client-datacap check verified client remaining bytes + check-notaries-datacap check notaries remaining bytes + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1052,66 +1079,66 @@ OPTIONS: ``` -### lotus verifreg verify-client +### lotus filplus grant-datacap ``` NAME: - lotus verifreg verify-client - give allowance to the specified verified client address + lotus filplus grant-datacap - give allowance to the specified verified client address USAGE: - lotus verifreg verify-client [command options] [arguments...] + lotus filplus grant-datacap [command options] [arguments...] OPTIONS: - --from value specify your verifier address to send the message from + --from value specify your notary address to send the message from --help, -h show help (default: false) ``` -### lotus verifreg list-verifiers +### lotus filplus list-notaries ``` NAME: - lotus verifreg list-verifiers - list all verifiers + lotus filplus list-notaries - list all notaries USAGE: - lotus verifreg list-verifiers [command options] [arguments...] + lotus filplus list-notaries [command options] [arguments...] OPTIONS: --help, -h show help (default: false) ``` -### lotus verifreg list-clients +### lotus filplus list-clients ``` NAME: - lotus verifreg list-clients - list all verified clients + lotus filplus list-clients - list all verified clients USAGE: - lotus verifreg list-clients [command options] [arguments...] + lotus filplus list-clients [command options] [arguments...] OPTIONS: --help, -h show help (default: false) ``` -### lotus verifreg check-client +### lotus filplus check-client-datacap ``` NAME: - lotus verifreg check-client - check verified client remaining bytes + lotus filplus check-client-datacap - check verified client remaining bytes USAGE: - lotus verifreg check-client [command options] [arguments...] + lotus filplus check-client-datacap [command options] [arguments...] OPTIONS: --help, -h show help (default: false) ``` -### lotus verifreg check-verifier +### lotus filplus check-notaries-datacap ``` NAME: - lotus verifreg check-verifier - check verifiers remaining bytes + lotus filplus check-notaries-datacap - check notaries remaining bytes USAGE: - lotus verifreg check-verifier [command options] [arguments...] + lotus filplus check-notaries-datacap [command options] [arguments...] OPTIONS: --help, -h show help (default: false) @@ -1388,6 +1415,7 @@ COMMANDS: find find a message in the mempool config get or set current mpool configuration gas-perf Check gas performance of messages in mempool + manage help, h Shows a list of commands or help for one command OPTIONS: @@ -1501,6 +1529,9 @@ OPTIONS: --help, -h show help (default: false) ``` +# nage +``` +``` ## lotus state ``` @@ -1642,7 +1673,7 @@ NAME: lotus state get-actor - Print actor information USAGE: - lotus state get-actor [command options] [actorrAddress] + lotus state get-actor [command options] [actorAddress] OPTIONS: --help, -h show help (default: false) @@ -1747,13 +1778,14 @@ NAME: lotus state call - Invoke a method on an actor locally USAGE: - lotus state call [command options] [toAddress methodId (optional)] + lotus state call [command options] [toAddress methodId params (optional)] OPTIONS: - --from value (default: "f00") - --value value specify value field for invocation (default: "0") - --ret value specify how to parse output (auto, raw, addr, big) (default: "auto") - --help, -h show help (default: false) + --from value (default: "f00") + --value value specify value field for invocation (default: "0") + --ret value specify how to parse output (raw, decoded, base64, hex) (default: "decoded") + --encoding value specify params encoding to parse (base64, hex) (default: "base64") + --help, -h show help (default: false) ``` @@ -2108,7 +2140,7 @@ USAGE: lotus chain export [command options] [outputPath] OPTIONS: - --tipset value + --tipset value specify tipset to start the export from (default: "@head") --recent-stateroots value specify the number of recent state roots to include in the export (default: 0) --skip-old-msgs (default: false) --help, -h show help (default: false) @@ -2760,3 +2792,20 @@ OPTIONS: --help, -h show help (default: false) ``` + +## lotus status +``` +NAME: + lotus status - Check node status + +USAGE: + lotus status [command options] [arguments...] + +CATEGORY: + STATUS + +OPTIONS: + --chain include chain health status (default: false) + --help, -h show help (default: false) + +``` diff --git a/documentation/en/jaeger-tracing.md b/documentation/en/jaeger-tracing.md index bbe4d3052..ec9351d53 100644 --- a/documentation/en/jaeger-tracing.md +++ b/documentation/en/jaeger-tracing.md @@ -12,7 +12,20 @@ Currently it is set up to use Jaeger, though other tracing backends should be fa To easily run and view tracing locally, first, install jaeger. The easiest way to do this is to [download the binaries](https://www.jaegertracing.io/download/) and then run the `jaeger-all-in-one` binary. This will start up jaeger, listen for spans on `localhost:6831`, and expose a web UI for viewing traces on `http://localhost:16686/`. -Now, to start sending traces from Lotus to Jaeger, set the environment variable `LOTUS_JAEGER` to `localhost:6831`, and start the `lotus daemon`. +Now, to start sending traces from Lotus to Jaeger, set the environment variable and start the daemon. + +```bash +export LOTUS_JAEGER_AGENT_ENDPOINT=127.0.0.1:6831 +lotus daemon +``` + +Alternatively, the agent endpoint can also be configured by a pair of environemnt variables to provide the host and port. The following snipit is functionally equivilent to the previous. + +```bash +export LOTUS_JAEGER_AGENT_HOST=127.0.0.1 +export LOTUS_JAEGER_AGENT_PORT=6831 +lotus daemon +``` Now, to view any generated traces, open up `http://localhost:16686/` in your browser. diff --git a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md new file mode 100644 index 000000000..8adab9671 --- /dev/null +++ b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md @@ -0,0 +1,115 @@ +> Release Issue Template + +# Lotus X.Y.Z Release + +We're happy to announce Lotus X.Y.Z... + +## 🗺 Must-dos for the release + +## 🌟 Nice-to-haves for the release + + + +## 🚢 Estimated shipping date + + + +## 🔦 Highlights + +< top highlights for this release notes > + +## ✅ Release Checklist + +**Note for whomever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end. + +First steps: + + - [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage. + - [ ] Bump the version in `version.go` in the `master` branch to `vX.(Y+1).0-dev`. + +Prepping an RC: + +- [ ] version string in `build/version.go` has been updated (in the `release/vX.Y.Z` branch). +- [ ] tag commit with `vX.Y.Z-rcN` +- [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true) + +Testing an RC: + +- [ ] **Stage 0 - Automated Testing** + - Automated Testing + - [ ] CI: Ensure that all tests are passing. + - [ ] Testground tests + +- [ ] **Stage 1 - Internal Testing** + - Binaries + - [ ] Ensure the RC release has downloadable binaries + - Upgrade our testnet infra + - [ ] Wait 24 hours, confirm nodes stay in sync + - Upgrade our mainnet infra + - [ ] Subset of development full archival nodes + - [ ] Subset of bootstrappers (1 per region) + - [ ] Confirm nodes stay in sync + - Metrics report + - Block validation time + - Memory / CPU usage + - Number of goroutines + - IPLD block read latency + - Bandwidth usage + - [ ] If anything has worsened significantly, investigate + fix + - Confirm the following work (some combination of Testground / Calibnet / Mainnet / beta users) + - [ ] Seal a sector + - [ ] make a deal + - [ ] Submit a PoSt + - [ ] (optional) let a sector go faulty, and see it be recovered + +- [ ] **Stage 2 - Community Testing** + - [ ] Inform beta miners (@lotus-early-testers-miner in Filecoin Slack #fil-lotus) + - [ ] Ask close ecosystem partners to test their projects (@lotus-early-testers-eco-dev in Filecoin slack #fil-lotus) + - [ ] Powergate + - [ ] Glif + - [ ] Zondax + - [ ] Stats dashboard + - [ ] Community dashboards + - [ ] Infura + - [ ] Sentinel + - [ ] Protofire + - [ ] Fleek + +- [ ] **Stage 3 - Community Prod Testing** + - [ ] Documentation + - [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date + - [ ] Check if any [config](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#configuration) updates are needed + - [ ] Invite the wider community through (link to the release issue): + - [ ] Check `Create a discussion for this release` when tagging for the major rcs(new features, hot-fixes) release + - [ ] Link the disucssion in #fil-lotus on Filecoin slack + +- [ ] **Stage 4 - Release** + - [ ] Final preparation + - [ ] Verify that version string in [`version.go`](https://github.com/ipfs/go-ipfs/tree/master/version.go) has been updated. + - [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date + - [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md` + - [ ] Merge `release-vX.Y.Z` into the `releases` branch. + - [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z` + - [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases). + - [ ] Check `Create a discussion for this release` when tagging the release + - [ ] Final announcements + - [ ] Update network.filecoin.io for mainnet, calib and nerpa. + - [ ] repost in #fil-lotus in filecoin slack + - [ ] Inform node provides (Protofire, Digital Ocean..) + +- [ ] **Post-Release** + - [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so! + - [ ] Update [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) with any improvements determined from this latest release iteration. + - [ ] Create an issue using [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) for the _next_ release. + +## ❤️ Contributors + +< list generated by scripts/mkreleaselog > + +Would you like to contribute to Lotus and don't know how? Well, there are a few places you can get started: + +- TODO + +## ⁉️ Do you have questions? + +Leave a comment [here]() if you have any questions. diff --git a/documentation/misc/actors_version_checklist.md b/documentation/misc/actors_version_checklist.md index 769e9da42..1fae4bd8a 100644 --- a/documentation/misc/actors_version_checklist.md +++ b/documentation/misc/actors_version_checklist.md @@ -4,6 +4,8 @@ - [ ] Define upgrade heights in `build/params_` - [ ] Generate adapters - [ ] Add the new version in `chain/actors/agen/main.go` + - [ ] Update adapter code in `chain/actors/builtin` if needed +- [ ] Update `chain/actors/policy/policy.go` - [ ] Update `chain/actors/version.go` - [ ] Register in `chain/vm/invoker.go` - [ ] Register in `chain/vm/mkactor.go` diff --git a/documentation/misc/gas_balancing.md b/documentation/misc/gas_balancing.md new file mode 100644 index 000000000..64d9fcf0e --- /dev/null +++ b/documentation/misc/gas_balancing.md @@ -0,0 +1,54 @@ +## Gas Balancing + +The gas balancing process targets to set gas costs of syscalls to be in line with +10 gas per nanosecond on reference hardware. +The process can be either performed for all syscalls based on existing messages and chain or targeted +at single syscall. + +#### Reference hardware + +The reference hardware is TR3970x with 128GB of RAM. This is what was available at the time and +may be subject to change. + +### Complete gas balancing + +Complete gas balancing is performed using `lotus-bench` the process is based on importing a chain export +and collecting gas traces which are later aggregated. + +Before building `lotus-bench` make sure `EnableGasTracing` in `chain/vm/runtime.go` is set to `true`. + +The process can be started using `./lotus-bench import` with `--car` flag set to the location of +CAR chain export. `--start-epoch` and `--end-epoch` can be used to to limit the range of epochs to run +the benchmark. Note that state tree of `start-epoch` needs to be in the CAR file or has to be previously computed +to work. + +The output will be a `bench.json` file containing information about every syscall invoked +and the time taken by these invocations. This file can grow to be quite big in size so make sure you have +spare space. + +After the bench run is complete the `bench.json` file can be analyzed with `./lotus-bench import analyze bench.json`. + +It will compute means, standard deviations and co-variances (when applicable) of syscall runtimes. +The output is in nanoseconds, so the gas values for syscalls should be 10x that. In cases where co-variance of +execution time to some parameter is evaluated, the strength of the correlation should be taken into account. + +#### Special cases + +OnImplPut compute gas is based on the flush time to disk of objects created, +during block execution (when gas traces are formed) objects are only written to memory. Use `vm/flush_copy_ms` and `vm/flush_copy_count` to estimate OnIpldPut compute cost. + + +### Targeted gas balancing + +In some cases complete gas balancing is infeasible, either new syscall gets introduced or +complete balancing is too time consuming. + +In these cases the recommended way to estimate gas for given syscall is to perform an `in-vivo` benchmark. +In the past `in-vitro` as in standalone benchmarks were found to be highly inaccurate when compared to results +of real execution. + +A in-vivo benchmark can be performed by running an example of such syscall during block execution. +The best place to hook-in such benchmark is message execution loop in +`chain/stmgr/stmgr.go` in `ApplyBlocks()`. Depending of time required to complete the syscall it might be +advisable to run the execution only once every few messages. + diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index dca8b44b5..820c53c4b 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -23,6 +23,7 @@ import ( commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) @@ -66,7 +67,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi } var done func() - var stagedFile *partialFile + var stagedFile *partialfile.PartialFile defer func() { if done != nil { @@ -87,7 +88,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = createPartialFile(maxPieceSize, stagedPath.Unsealed) + stagedFile, err = partialfile.CreatePartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err) } @@ -97,7 +98,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err) } - stagedFile, err = openPartialFile(maxPieceSize, stagedPath.Unsealed) + stagedFile, err = partialfile.OpenPartialFile(maxPieceSize, stagedPath.Unsealed) if err != nil { return abi.PieceInfo{}, xerrors.Errorf("opening unsealed sector file: %w", err) } @@ -195,12 +196,16 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return piecePromises[0]() } + var payloadRoundedBytes abi.PaddedPieceSize pieceCids := make([]abi.PieceInfo, len(piecePromises)) for i, promise := range piecePromises { - pieceCids[i], err = promise() + pinfo, err := promise() if err != nil { return abi.PieceInfo{}, err } + + pieceCids[i] = pinfo + payloadRoundedBytes += pinfo.Size } pieceCID, err := ffi.GenerateUnsealedCID(sector.ProofType, pieceCids) @@ -213,6 +218,15 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existi return abi.PieceInfo{}, err } + if payloadRoundedBytes < pieceSize.Padded() { + paddedCid, err := commpffi.ZeroPadPieceCommitment(pieceCID, payloadRoundedBytes.Unpadded(), pieceSize) + if err != nil { + return abi.PieceInfo{}, xerrors.Errorf("failed to pad data: %w", err) + } + + pieceCID = paddedCid + } + return abi.PieceInfo{ Size: pieceSize.Padded(), PieceCID: pieceCID, @@ -244,7 +258,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off // try finding existing unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathStorage) - var pf *partialFile + var pf *partialfile.PartialFile switch { case xerrors.Is(err, storiface.ErrSectorNotFound): @@ -254,7 +268,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off } defer done() - pf, err = createPartialFile(maxPieceSize, unsealedPath.Unsealed) + pf, err = partialfile.CreatePartialFile(maxPieceSize, unsealedPath.Unsealed) if err != nil { return xerrors.Errorf("create unsealed file: %w", err) } @@ -262,7 +276,7 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off case err == nil: defer done() - pf, err = openPartialFile(maxPieceSize, unsealedPath.Unsealed) + pf, err = partialfile.OpenPartialFile(maxPieceSize, unsealedPath.Unsealed) if err != nil { return xerrors.Errorf("opening partial file: %w", err) } @@ -414,7 +428,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storag } maxPieceSize := abi.PaddedPieceSize(ssize) - pf, err := openPartialFile(maxPieceSize, path.Unsealed) + pf, err := partialfile.OpenPartialFile(maxPieceSize, path.Unsealed) if err != nil { if xerrors.Is(err, os.ErrNotExist) { return false, nil @@ -576,7 +590,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, if len(keepUnsealed) > 0 { - sr := pieceRun(0, maxPieceSize) + sr := partialfile.PieceRun(0, maxPieceSize) for _, s := range keepUnsealed { si := &rlepluslazy.RunSliceIterator{} @@ -598,7 +612,7 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, } defer done() - pf, err := openPartialFile(maxPieceSize, paths.Unsealed) + pf, err := partialfile.OpenPartialFile(maxPieceSize, paths.Unsealed) if err == nil { var at uint64 for sr.HasNext() { diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 4adec48cf..a6034cc79 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -31,6 +31,7 @@ import ( "github.com/filecoin-project/specs-storage/storage" ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/filecoin-project/filecoin-ffi/generated" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper/basicfs" @@ -252,7 +253,7 @@ func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) { // go test -run=^TestDownloadParams // func TestDownloadParams(t *testing.T) { - defer requireFDsClosed(t, openFDs(t)) + // defer requireFDsClosed(t, openFDs(t)) flaky likely cause of how go-embed works with param files getGrothParamFileAndVerifyingKeys(sectorSize) } @@ -811,3 +812,130 @@ func BenchmarkAddPiece512M(b *testing.B) { fmt.Println(c) } } + +func TestAddPiece512MPadded(t *testing.T) { + sz := abi.PaddedPieceSize(512 << 20).Unpadded() + + cdir, err := ioutil.TempDir("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + } + t.Cleanup(cleanup) + + r := rand.New(rand.NewSource(0x7e5)) + + c, err := sb.AddPiece(context.TODO(), storage.SectorRef{ + ID: abi.SectorID{ + Miner: miner, + Number: 0, + }, + ProofType: abi.RegisteredSealProof_StackedDrg512MiBV1_1, + }, nil, sz, io.LimitReader(r, int64(sz/4))) + if err != nil { + t.Fatalf("add piece failed: %s", err) + } + + require.Equal(t, "baga6ea4seaqonenxyku4o7hr5xkzbqsceipf6xgli3on54beqbk6k246sbooobq", c.PieceCID.String()) +} + +func setupLogger(t *testing.T) *bytes.Buffer { + _ = os.Setenv("RUST_LOG", "info") + + var bb bytes.Buffer + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + + go func() { + _, _ = io.Copy(&bb, r) + runtime.KeepAlive(w) + }() + + resp := generated.FilInitLogFd(int32(w.Fd())) + resp.Deref() + + defer generated.FilDestroyInitLogFdResponse(resp) + + if resp.StatusCode != generated.FCPResponseStatusFCPNoError { + t.Fatal(generated.RawString(resp.ErrorMsg).Copy()) + } + + return &bb +} + +func TestMulticoreSDR(t *testing.T) { + if os.Getenv("TEST_RUSTPROOFS_LOGS") != "1" { + t.Skip("skipping test without TEST_RUSTPROOFS_LOGS=1") + } + + rustLogger := setupLogger(t) + + getGrothParamFileAndVerifyingKeys(sectorSize) + + dir, err := ioutil.TempDir("", "sbtest") + if err != nil { + t.Fatal(err) + } + + miner := abi.ActorID(123) + + sp := &basicfs.Provider{ + Root: dir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + + cleanup := func() { + if t.Failed() { + fmt.Printf("not removing %s\n", dir) + return + } + if err := os.RemoveAll(dir); err != nil { + t.Error(err) + } + } + defer cleanup() + + si := storage.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } + + s := seal{ref: si} + + // check multicore + _ = os.Setenv("FIL_PROOFS_USE_MULTICORE_SDR", "1") + rustLogger.Reset() + s.precommit(t, sb, si, func() {}) + + ok := false + for _, s := range strings.Split(rustLogger.String(), "\n") { + if strings.Contains(s, "create_label::multi") { + ok = true + break + } + } + + require.True(t, ok) +} diff --git a/extern/sector-storage/ffiwrapper/unseal_ranges.go b/extern/sector-storage/ffiwrapper/unseal_ranges.go index 4519fc21e..3a13c73a7 100644 --- a/extern/sector-storage/ffiwrapper/unseal_ranges.go +++ b/extern/sector-storage/ffiwrapper/unseal_ranges.go @@ -7,6 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) @@ -17,7 +18,7 @@ const mergeGaps = 32 << 20 // TODO const expandRuns = 16 << 20 // unseal more than requested for future requests func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) { - todo := pieceRun(offset.Padded(), size.Padded()) + todo := partialfile.PieceRun(offset.Padded(), size.Padded()) todo, err := rlepluslazy.Subtract(todo, unsealed) if err != nil { return nil, xerrors.Errorf("compute todo-unsealed: %w", err) diff --git a/extern/sector-storage/fr32/readers.go b/extern/sector-storage/fr32/readers.go index 20f3e9b31..f14d5bf1c 100644 --- a/extern/sector-storage/fr32/readers.go +++ b/extern/sector-storage/fr32/readers.go @@ -51,13 +51,12 @@ func (r *unpadReader) Read(out []byte) (int, error) { r.left -= uint64(todo) - n, err := r.src.Read(r.work[:todo]) + n, err := io.ReadAtLeast(r.src, r.work[:todo], int(todo)) if err != nil && err != io.EOF { return n, err } - - if n != int(todo) { - return 0, xerrors.Errorf("didn't read enough: %w", err) + if n < int(todo) { + return 0, xerrors.Errorf("didn't read enough: %d / %d, left %d, out %d", n, todo, r.left, len(out)) } Unpad(r.work[:todo], out[:todo.Unpadded()]) diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 402a58593..bf676bffa 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -29,8 +29,6 @@ var log = logging.Logger("advmgr") var ErrNoWorkers = errors.New("no suitable workers found") -type URLs []string - type Worker interface { storiface.WorkerCalls @@ -47,8 +45,6 @@ type Worker interface { } type SectorManager interface { - ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ffiwrapper.StorageSealer storage.Prover storiface.WorkerReturn @@ -89,6 +85,20 @@ type result struct { err error } +// ResourceFilteringStrategy is an enum indicating the kinds of resource +// filtering strategies that can be configured for workers. +type ResourceFilteringStrategy string + +const ( + // ResourceFilteringHardware specifies that available hardware resources + // should be evaluated when scheduling a task against the worker. + ResourceFilteringHardware = ResourceFilteringStrategy("hardware") + + // ResourceFilteringDisabled disables resource filtering against this + // worker. The scheduler may assign any task to this worker. + ResourceFilteringDisabled = ResourceFilteringStrategy("disabled") +) + type SealerConfig struct { ParallelFetchLimit int @@ -98,6 +108,11 @@ type SealerConfig struct { AllowPreCommit2 bool AllowCommit bool AllowUnseal bool + + // ResourceFiltering instructs the system which resource filtering strategy + // to use when evaluating tasks against this worker. An empty value defaults + // to "hardware". + ResourceFiltering ResourceFilteringStrategy } type StorageAuth http.Header @@ -105,24 +120,17 @@ type StorageAuth http.Header type WorkerStateStore *statestore.StateStore type ManagerStateStore *statestore.StateStore -func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, urls URLs, sa StorageAuth, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { - lstor, err := stores.NewLocal(ctx, ls, si, urls) - if err != nil { - return nil, err - } - +func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc SealerConfig, wss WorkerStateStore, mss ManagerStateStore) (*Manager, error) { prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) if err != nil { return nil, xerrors.Errorf("creating prover instance: %w", err) } - stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit) - m := &Manager{ ls: ls, storage: stor, localStore: lstor, - remoteHnd: &stores.FetchHandler{Local: lstor}, + remoteHnd: &stores.FetchHandler{Local: lstor, PfHandler: &stores.DefaultPartialFileHandler{}}, index: si, sched: newScheduler(), @@ -141,7 +149,7 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc go m.sched.runSched() localTasks := []sealtasks.TaskType{ - sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTReadUnsealed, + sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, } if sc.AllowAddPiece { localTasks = append(localTasks, sealtasks.TTAddPiece) @@ -159,9 +167,12 @@ func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, sc localTasks = append(localTasks, sealtasks.TTUnseal) } - err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{ - TaskTypes: localTasks, - }, stor, lstor, si, m, wss)) + wcfg := WorkerConfig{ + IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled, + TaskTypes: localTasks, + } + worker := NewLocalWorker(wcfg, stor, lstor, si, m, wss) + err = m.AddWorker(ctx, worker) if err != nil { return nil, xerrors.Errorf("adding local worker: %w", err) } @@ -206,71 +217,11 @@ func (m *Manager) schedFetch(sector storage.SectorRef, ft storiface.SectorFileTy } } -func (m *Manager) readPiece(sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, rok *bool) func(ctx context.Context, w Worker) error { - return func(ctx context.Context, w Worker) error { - log.Debugf("read piece data from sector %d, offset %d, size %d", sector.ID, offset, size) - r, err := m.waitSimpleCall(ctx)(w.ReadPiece(ctx, sink, sector, offset, size)) - if err != nil { - return err - } - if r != nil { - *rok = r.(bool) - } - log.Debugf("completed read piece data from sector %d, offset %d, size %d: read ok? %t", sector.ID, offset, size, *rok) - return nil - } -} - -func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) { - - // acquire a lock purely for reading unsealed sectors - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - log.Debugf("acquire read sector lock for sector %d", sector.ID) - if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { - returnErr = xerrors.Errorf("acquiring read sector lock: %w", err) - return - } - - log.Debugf("find unsealed sector %d", sector.ID) - // passing 0 spt because we only need it when allowFetch is true - best, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) - if err != nil { - returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err) - return - } - - foundUnsealed = len(best) > 0 - if foundUnsealed { // append to existing - // There is unsealed sector, see if we can read from it - log.Debugf("found unsealed sector %d", sector.ID) - - selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) - - log.Debugf("scheduling read of unsealed sector %d", sector.ID) - err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), - m.readPiece(sink, sector, offset, size, &readOk)) - if err != nil { - returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err) - } - } else { - log.Debugf("did not find unsealed sector %d", sector.ID) - selector = newAllocSelector(m.index, storiface.FTUnsealed, storiface.PathSealing) - } - return -} - -func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error { - log.Debugf("fetch and read piece in sector %d, offset %d, size %d", sector.ID, offset, size) - foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size) - if err != nil { - return err - } - if readOk { - log.Debugf("completed read of unsealed piece in sector %d, offset %d, size %d", sector.ID, offset, size) - return nil - } +// SectorsUnsealPiece will Unseal the Sealed sector file for the given sector. +// It will schedule the Unsealing task on a worker that either already has the sealed sector files or has space in +// one of it's sealing scratch spaces to store them after fetching them from another worker. +// If the chosen worker already has the Unsealed sector file, we will NOT Unseal the sealed sector file again. +func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed *cid.Cid) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -279,22 +230,18 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage. return xerrors.Errorf("acquiring unseal sector lock: %w", err) } - unsealFetch := func(ctx context.Context, worker Worker) error { + // if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and + // put it in the sealing scratch space. + sealFetch := func(ctx context.Context, worker Worker) error { log.Debugf("copy sealed/cache sector data for sector %d", sector.ID) if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)); err != nil { return xerrors.Errorf("copy sealed/cache sector data: %w", err) } - if foundUnsealed { - log.Debugf("copy unsealed sector data for sector %d", sector.ID) - if _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove)); err != nil { - return xerrors.Errorf("copy unsealed sector data: %w", err) - } - } return nil } - if unsealed == cid.Undef { + if unsealed == nil { return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size) } @@ -303,36 +250,28 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage. return xerrors.Errorf("getting sector size: %w", err) } - log.Debugf("schedule unseal for sector %d", sector.ID) - err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error { + // selector will schedule the Unseal task on a worker that either already has the sealed sector files or has space in + // one of it's sealing scratch spaces to store them after fetching them from another worker. + selector := newExistingSelector(m.index, sector.ID, storiface.FTSealed|storiface.FTCache, true) + + log.Debugf("will schedule unseal for sector %d", sector.ID) + err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, sealFetch, func(ctx context.Context, w Worker) error { // TODO: make restartable // NOTE: we're unsealing the whole sector here as with SDR we can't really // unseal the sector partially. Requesting the whole sector here can // save us some work in case another piece is requested from here - log.Debugf("unseal sector %d", sector.ID) - _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, 0, abi.PaddedPieceSize(ssize).Unpadded(), ticket, unsealed)) + log.Debugf("calling unseal sector on worker, sectoID=%d", sector.ID) + + // Note: This unseal piece call will essentially become a no-op if the worker already has an Unsealed sector file for the given sector. + _, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, 0, abi.PaddedPieceSize(ssize).Unpadded(), ticket, *unsealed)) log.Debugf("completed unseal sector %d", sector.ID) return err }) if err != nil { - return err + return xerrors.Errorf("worker UnsealPiece call: %s", err) } - selector = newExistingSelector(m.index, sector.ID, storiface.FTUnsealed, false) - - log.Debugf("schedule read piece for sector %d, offset %d, size %d", sector.ID, offset, size) - err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, m.schedFetch(sector, storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), - m.readPiece(sink, sector, offset, size, &readOk)) - if err != nil { - return xerrors.Errorf("reading piece from sealed sector: %w", err) - } - - if !readOk { - return xerrors.Errorf("failed to read unsealed piece") - } - - log.Debugf("completed read of piece in sector %d, offset %d, size %d", sector.ID, offset, size) return nil } @@ -782,4 +721,5 @@ func (m *Manager) Close(ctx context.Context) error { return m.sched.Close(ctx) } +var _ Unsealer = &Manager{} var _ SectorManager = &Manager{} diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index 1cf9d0aad..d4044bbae 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -98,7 +98,7 @@ func newTestMgr(ctx context.Context, t *testing.T, ds datastore.Datastore) (*Man prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}) require.NoError(t, err) - stor := stores.NewRemote(lstor, si, nil, 6000) + stor := stores.NewRemote(lstor, si, nil, 6000, &stores.DefaultPartialFileHandler{}) m := &Manager{ ls: st, diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 52496f836..273f0928e 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -6,6 +6,7 @@ import ( "crypto/sha256" "fmt" "io" + "io/ioutil" "math/rand" "sync" @@ -34,7 +35,9 @@ type SectorMgr struct { lk sync.Mutex } -type mockVerifProver struct{} +type mockVerifProver struct { + aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies +} func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { sectors := make(map[abi.SectorID]*sectorState) @@ -72,6 +75,10 @@ func (mgr *SectorMgr) NewSector(ctx context.Context, sector storage.SectorRef) e return nil } +func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { + panic("SectorMgr: unsealing piece: implement me") +} + func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) { log.Warn("Add piece: ", sectorID, size, sectorID.ProofType) @@ -116,6 +123,10 @@ func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) { return id, nil } +func (mgr *SectorMgr) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + return false, nil +} + func (mgr *SectorMgr) ForceState(sid storage.SectorRef, st int) error { mgr.lk.Lock() ss, ok := mgr.sectors[sid.ID] @@ -373,13 +384,12 @@ func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSea } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { if offset != 0 { panic("implme") } - _, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID.ID].pieces[0]]), int64(size)) - return err + return ioutil.NopCloser(bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])), false, nil } func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) { @@ -522,7 +532,19 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri } } - return bytes.Equal(aggregate.Proof, out), nil + ok := bytes.Equal(aggregate.Proof, out) + if !ok { + genInfo, found := m.aggregates[string(aggregate.Proof)] + if !found { + log.Errorf("BAD AGGREGATE: saved generate inputs not found; agg.Proof: %x; expected: %x", aggregate.Proof, out) + } else { + log.Errorf("BAD AGGREGATE (1): agg.Proof: %x; expected: %x", aggregate.Proof, out) + log.Errorf("BAD AGGREGATE (2): Verify Infos: %+v", aggregate.Infos) + log.Errorf("BAD AGGREGATE (3): Generate Infos: %+v", genInfo.Infos) + } + } + + return ok, nil } func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { @@ -533,6 +555,8 @@ func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealV } } + m.aggregates[string(out)] = aggregateInfo + return out, nil } @@ -592,8 +616,11 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, return []uint64{0}, nil } -var MockVerifier = mockVerifProver{} -var MockProver = mockVerifProver{} +var MockVerifier = mockVerifProver{ + aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{}, +} + +var MockProver = MockVerifier var _ storage.Sealer = &SectorMgr{} var _ ffiwrapper.Verifier = MockVerifier diff --git a/extern/sector-storage/ffiwrapper/partialfile.go b/extern/sector-storage/partialfile/partialfile.go similarity index 85% rename from extern/sector-storage/ffiwrapper/partialfile.go rename to extern/sector-storage/partialfile/partialfile.go index e19930ac1..529e889ea 100644 --- a/extern/sector-storage/ffiwrapper/partialfile.go +++ b/extern/sector-storage/partialfile/partialfile.go @@ -1,4 +1,4 @@ -package ffiwrapper +package partialfile import ( "encoding/binary" @@ -14,8 +14,12 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + + logging "github.com/ipfs/go-log/v2" ) +var log = logging.Logger("partialfile") + const veryLargeRle = 1 << 20 // Sectors can be partially unsealed. We support this by appending a small @@ -25,7 +29,7 @@ const veryLargeRle = 1 << 20 // unsealed sector files internally have this structure // [unpadded (raw) data][rle+][4B LE length fo the rle+ field] -type partialFile struct { +type PartialFile struct { maxPiece abi.PaddedPieceSize path string @@ -57,7 +61,7 @@ func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) err return w.Truncate(maxPieceSize + int64(rb) + 4) } -func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { +func CreatePartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) // nolint if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -89,10 +93,10 @@ func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialF return nil, xerrors.Errorf("close empty partial file: %w", err) } - return openPartialFile(maxPieceSize, path) + return OpenPartialFile(maxPieceSize, path) } -func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) { +func OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialFile, error) { f, err := os.OpenFile(path, os.O_RDWR, 0644) // nolint if err != nil { return nil, xerrors.Errorf("openning partial file '%s': %w", path, err) @@ -165,7 +169,7 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil return nil, err } - return &partialFile{ + return &PartialFile{ maxPiece: maxPieceSize, path: path, allocated: rle, @@ -173,11 +177,11 @@ func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFil }, nil } -func (pf *partialFile) Close() error { +func (pf *PartialFile) Close() error { return pf.file.Close() } -func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { +func (pf *PartialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -188,7 +192,7 @@ func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedP return nil, err } - and, err := rlepluslazy.And(have, pieceRun(offset, size)) + and, err := rlepluslazy.And(have, PieceRun(offset, size)) if err != nil { return nil, err } @@ -206,13 +210,13 @@ func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedP return pf.file, nil } -func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { +func (pf *PartialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err } - ored, err := rlepluslazy.Or(have, pieceRun(offset, size)) + ored, err := rlepluslazy.Or(have, PieceRun(offset, size)) if err != nil { return err } @@ -224,7 +228,7 @@ func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi. return nil } -func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { +func (pf *PartialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error { have, err := pf.allocated.RunIterator() if err != nil { return err @@ -234,7 +238,7 @@ func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPie return xerrors.Errorf("deallocating: %w", err) } - s, err := rlepluslazy.Subtract(have, pieceRun(offset, size)) + s, err := rlepluslazy.Subtract(have, PieceRun(offset, size)) if err != nil { return err } @@ -246,7 +250,7 @@ func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPie return nil } -func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { +func (pf *PartialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil { return nil, xerrors.Errorf("seek piece start: %w", err) } @@ -257,7 +261,7 @@ func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedP return nil, err } - and, err := rlepluslazy.And(have, pieceRun(offset, size)) + and, err := rlepluslazy.And(have, PieceRun(offset, size)) if err != nil { return nil, err } @@ -275,17 +279,17 @@ func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedP return pf.file, nil } -func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) { +func (pf *PartialFile) Allocated() (rlepluslazy.RunIterator, error) { return pf.allocated.RunIterator() } -func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { +func (pf *PartialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { have, err := pf.Allocated() if err != nil { return false, err } - u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded())) + u, err := rlepluslazy.And(have, PieceRun(offset.Padded(), size.Padded())) if err != nil { return false, err } @@ -298,7 +302,7 @@ func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi return abi.PaddedPieceSize(uc) == size.Padded(), nil } -func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { +func PieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator { var runs []rlepluslazy.Run if offset > 0 { runs = append(runs, rlepluslazy.Run{ diff --git a/extern/sector-storage/piece_provider.go b/extern/sector-storage/piece_provider.go new file mode 100644 index 000000000..ad3a2543e --- /dev/null +++ b/extern/sector-storage/piece_provider.go @@ -0,0 +1,176 @@ +package sectorstorage + +import ( + "bufio" + "context" + "io" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/extern/sector-storage/fr32" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +type Unsealer interface { + // SectorsUnsealPiece will Unseal a Sealed sector file for the given sector. + SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error +} + +type PieceProvider interface { + // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector + ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) + IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) +} + +var _ PieceProvider = &pieceProvider{} + +type pieceProvider struct { + storage *stores.Remote + index stores.SectorIndex + uns Unsealer +} + +func NewPieceProvider(storage *stores.Remote, index stores.SectorIndex, uns Unsealer) PieceProvider { + return &pieceProvider{ + storage: storage, + index: index, + uns: uns, + } +} + +// IsUnsealed checks if we have the unsealed piece at the given offset in an already +// existing unsealed file either locally or on any of the workers. +func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + if err := offset.Valid(); err != nil { + return false, xerrors.Errorf("offset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return false, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + ctxLock, cancel := context.WithCancel(ctx) + defer cancel() + + if err := p.index.StorageLock(ctxLock, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + return false, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + return p.storage.CheckIsUnsealed(ctxLock, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) +} + +// tryReadUnsealedPiece will try to read the unsealed piece from an existing unsealed sector file for the given sector from any worker that has it. +// It will NOT try to schedule an Unseal of a sealed sector file for the read. +// +// Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. +func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.ReadCloser, context.CancelFunc, error) { + // acquire a lock purely for reading unsealed sectors + ctx, cancel := context.WithCancel(ctx) + if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { + cancel() + return nil, nil, xerrors.Errorf("acquiring read sector lock: %w", err) + } + + // Reader returns a reader for an unsealed piece at the given offset in the given sector. + // The returned reader will be nil if none of the workers has an unsealed sector file containing + // the unsealed piece. + r, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) + if err != nil { + log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err) + cancel() + return nil, nil, err + } + if r == nil { + cancel() + } + + return r, cancel, nil +} + +// ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector +// If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read. +// Otherwise, we will Unseal a Sealed sector file for the given sector and read the Unsealed piece from it. +// If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal, +// the returned boolean parameter will be set to true. +// If we have an existing unsealed file containing the given piece, the returned boolean will be set to false. +func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { + if err := offset.Valid(); err != nil { + return nil, false, xerrors.Errorf("offset is not valid: %w", err) + } + if err := size.Validate(); err != nil { + return nil, false, xerrors.Errorf("size is not a valid piece size: %w", err) + } + + r, unlock, err := p.tryReadUnsealedPiece(ctx, sector, offset, size) + + log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err) + + if xerrors.Is(err, storiface.ErrSectorNotFound) { + log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + err = nil + } + if err != nil { + log.Errorf("returning error from ReadPiece:%s", err) + return nil, false, err + } + + var uns bool + + if r == nil { + // a nil reader means that none of the workers has an unsealed sector file + // containing the unsealed piece. + // we now need to unseal a sealed sector file for the given sector to read the unsealed piece from it. + uns = true + commd := &unsealed + if unsealed == cid.Undef { + commd = nil + } + if err := p.uns.SectorsUnsealPiece(ctx, sector, offset, size, ticket, commd); err != nil { + log.Errorf("failed to SectorsUnsealPiece: %s", err) + return nil, false, xerrors.Errorf("unsealing piece: %w", err) + } + + log.Debugf("unsealed a sector file to read the piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + + r, unlock, err = p.tryReadUnsealedPiece(ctx, sector, offset, size) + if err != nil { + log.Errorf("failed to tryReadUnsealedPiece after SectorsUnsealPiece: %s", err) + return nil, true, xerrors.Errorf("read after unsealing: %w", err) + } + if r == nil { + log.Errorf("got no reader after unsealing piece") + return nil, true, xerrors.Errorf("got no reader after unsealing piece") + } + log.Debugf("got a reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + } else { + log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, offset=%d, size=%d", sector, offset, size) + } + + upr, err := fr32.NewUnpadReader(r, size.Padded()) + if err != nil { + unlock() + return nil, uns, xerrors.Errorf("creating unpadded reader: %w", err) + } + + log.Debugf("returning reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + + return &funcCloser{ + Reader: bufio.NewReaderSize(upr, 127), + close: func() error { + err = r.Close() + unlock() + return err + }, + }, uns, nil +} + +type funcCloser struct { + io.Reader + close func() error +} + +func (fc *funcCloser) Close() error { return fc.close() } diff --git a/extern/sector-storage/piece_provider_test.go b/extern/sector-storage/piece_provider_test.go new file mode 100644 index 000000000..d6fa14574 --- /dev/null +++ b/extern/sector-storage/piece_provider_test.go @@ -0,0 +1,361 @@ +package sectorstorage + +import ( + "bytes" + "context" + "io/ioutil" + "math/rand" + "net" + "net/http" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-statestore" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + specstorage "github.com/filecoin-project/specs-storage/storage" + "github.com/gorilla/mux" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + ds_sync "github.com/ipfs/go-datastore/sync" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +// TestPieceProviderReadPiece verifies that the ReadPiece method works correctly +// only uses miner and does NOT use any remote worker. +func TestPieceProviderSimpleNoRemoteWorker(t *testing.T) { + // Set up sector storage manager + sealerCfg := SealerConfig{ + ParallelFetchLimit: 10, + AllowAddPiece: true, + AllowPreCommit1: true, + AllowPreCommit2: true, + AllowCommit: true, + AllowUnseal: true, + } + + ppt := newPieceProviderTestHarness(t, sealerCfg, abi.RegisteredSealProof_StackedDrg8MiBV1) + defer ppt.shutdown(t) + + // Create some padded data that aligns with the piece boundaries. + pieceData := generatePieceData(8 * 127 * 1024 * 8) + size := abi.UnpaddedPieceSize(len(pieceData)) + ppt.addPiece(t, pieceData) + + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // pre-commit 1 + preCommit1 := ppt.preCommit1(t) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // pre-commit 2 + ppt.preCommit2(t, preCommit1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read piece + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + + // finalize -> nil here will remove unsealed file + ppt.finalizeSector(t, nil) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // Read the piece -> will have to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + true, pieceData) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), size)) + // read the piece -> will not have to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), size, + false, pieceData) + +} +func TestReadPieceRemoteWorkers(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + // miner's worker can only add pieces to an unsealed sector. + sealerCfg := SealerConfig{ + ParallelFetchLimit: 10, + AllowAddPiece: true, + AllowPreCommit1: false, + AllowPreCommit2: false, + AllowCommit: false, + AllowUnseal: false, + } + + // test harness for an 8M sector. + ppt := newPieceProviderTestHarness(t, sealerCfg, abi.RegisteredSealProof_StackedDrg8MiBV1) + defer ppt.shutdown(t) + + // worker 2 will ONLY help with the sealing by first fetching + // the unsealed file from the miner. + ppt.addRemoteWorker(t, []sealtasks.TaskType{ + sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, + sealtasks.TTFetch, sealtasks.TTFinalize, + }) + + // create a worker that can ONLY unseal and fetch + ppt.addRemoteWorker(t, []sealtasks.TaskType{ + sealtasks.TTUnseal, sealtasks.TTFetch, + }) + + // run the test + + // add one piece that aligns with the padding/piece boundaries. + pd1 := generatePieceData(8 * 127 * 4 * 1024) + pi1 := ppt.addPiece(t, pd1) + pd1size := pi1.Size.Unpadded() + + pd2 := generatePieceData(8 * 127 * 4 * 1024) + pi2 := ppt.addPiece(t, pd2) + pd2size := pi2.Size.Unpadded() + + // pre-commit 1 + pC1 := ppt.preCommit1(t) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> no need to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + false, pd1) + + // pre-commit 2 + ppt.preCommit2(t, pC1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> no need to unseal + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + false, pd1) + + // finalize the sector so we declare to the index we have the sealed file + // so the unsealing worker can later look it up and fetch it if needed + // sending nil here will remove all unsealed files after sector is finalized. + ppt.finalizeSector(t, nil) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + // Read the piece -> have to unseal since we removed the file. + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + true, pd1) + + // Read the same piece again -> will NOT have to unseal. + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, false, pd1) + + // remove the unsealed file and read again -> will have to unseal. + ppt.removeAllUnsealedSectorFiles(t) + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(0), pd1size)) + ppt.readPiece(t, storiface.UnpaddedByteIndex(0), pd1size, + true, pd1) + + // check if IsUnsealed -> true + require.True(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(pd1size), pd2size)) + // Read Piece 2 -> no unsealing as it got unsealed above. + ppt.readPiece(t, storiface.UnpaddedByteIndex(pd1size), pd2size, false, pd2) + + // remove all unseal files -> Read Piece 2 -> will have to Unseal. + ppt.removeAllUnsealedSectorFiles(t) + + // check if IsUnsealed -> false + require.False(t, ppt.isUnsealed(t, storiface.UnpaddedByteIndex(pd1size), pd2size)) + ppt.readPiece(t, storiface.UnpaddedByteIndex(pd1size), pd2size, true, pd2) +} + +type pieceProviderTestHarness struct { + ctx context.Context + index *stores.Index + pp PieceProvider + sector specstorage.SectorRef + mgr *Manager + ticket abi.SealRandomness + commD cid.Cid + localStores []*stores.Local + + servers []*http.Server + + addedPieces []abi.PieceInfo +} + +func generatePieceData(size uint64) []byte { + bz := make([]byte, size) + rand.Read(bz) + return bz +} + +func newPieceProviderTestHarness(t *testing.T, mgrConfig SealerConfig, sectorProofType abi.RegisteredSealProof) *pieceProviderTestHarness { + ctx := context.Background() + // listen on tcp socket to create an http server later + address := "0.0.0.0:0" + nl, err := net.Listen("tcp", address) + require.NoError(t, err) + + // create index, storage, local store & remote store. + index := stores.NewIndex() + storage := newTestStorage(t) + localStore, err := stores.NewLocal(ctx, storage, index, []string{"http://" + nl.Addr().String() + "/remote"}) + require.NoError(t, err) + remoteStore := stores.NewRemote(localStore, index, nil, 6000, &stores.DefaultPartialFileHandler{}) + + // data stores for state tracking. + dstore := ds_sync.MutexWrap(datastore.NewMapDatastore()) + wsts := statestore.New(namespace.Wrap(dstore, datastore.NewKey("/worker/calls"))) + smsts := statestore.New(namespace.Wrap(dstore, datastore.NewKey("/stmgr/calls"))) + + mgr, err := New(ctx, localStore, remoteStore, storage, index, mgrConfig, wsts, smsts) + require.NoError(t, err) + + // start a http server on the manager to serve sector file requests. + svc := &http.Server{ + Addr: nl.Addr().String(), + Handler: mgr, + } + go func() { + _ = svc.Serve(nl) + }() + + pp := NewPieceProvider(remoteStore, index, mgr) + + sector := specstorage.SectorRef{ + ID: abi.SectorID{ + Miner: 100, + Number: 10, + }, + ProofType: sectorProofType, + } + + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + + ppt := &pieceProviderTestHarness{ + ctx: ctx, + index: index, + pp: pp, + sector: sector, + mgr: mgr, + ticket: ticket, + } + ppt.servers = append(ppt.servers, svc) + ppt.localStores = append(ppt.localStores, localStore) + return ppt +} + +func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []sealtasks.TaskType) { + // start an http Server + address := "0.0.0.0:0" + nl, err := net.Listen("tcp", address) + require.NoError(t, err) + + localStore, err := stores.NewLocal(p.ctx, newTestStorage(t), p.index, []string{"http://" + nl.Addr().String() + "/remote"}) + require.NoError(t, err) + + fh := &stores.FetchHandler{ + Local: localStore, + PfHandler: &stores.DefaultPartialFileHandler{}, + } + + mux := mux.NewRouter() + mux.PathPrefix("/remote").HandlerFunc(fh.ServeHTTP) + svc := &http.Server{ + Addr: nl.Addr().String(), + Handler: mux, + } + + go func() { + _ = svc.Serve(nl) + }() + + remote := stores.NewRemote(localStore, p.index, nil, 1000, + &stores.DefaultPartialFileHandler{}) + + dstore := ds_sync.MutexWrap(datastore.NewMapDatastore()) + csts := statestore.New(namespace.Wrap(dstore, datastore.NewKey("/stmgr/calls"))) + + worker := newLocalWorker(nil, WorkerConfig{ + TaskTypes: tasks, + }, remote, localStore, p.index, p.mgr, csts) + + p.servers = append(p.servers, svc) + p.localStores = append(p.localStores, localStore) + + // register self with manager + require.NoError(t, p.mgr.AddWorker(p.ctx, worker)) +} + +func (p *pieceProviderTestHarness) removeAllUnsealedSectorFiles(t *testing.T) { + for i := range p.localStores { + ls := p.localStores[i] + require.NoError(t, ls.Remove(p.ctx, p.sector.ID, storiface.FTUnsealed, false)) + } +} + +func (p *pieceProviderTestHarness) addPiece(t *testing.T, pieceData []byte) abi.PieceInfo { + var existing []abi.UnpaddedPieceSize + for _, pi := range p.addedPieces { + existing = append(existing, pi.Size.Unpadded()) + } + + size := abi.UnpaddedPieceSize(len(pieceData)) + pieceInfo, err := p.mgr.AddPiece(p.ctx, p.sector, existing, size, bytes.NewReader(pieceData)) + require.NoError(t, err) + + p.addedPieces = append(p.addedPieces, pieceInfo) + return pieceInfo +} + +func (p *pieceProviderTestHarness) preCommit1(t *testing.T) specstorage.PreCommit1Out { + preCommit1, err := p.mgr.SealPreCommit1(p.ctx, p.sector, p.ticket, p.addedPieces) + require.NoError(t, err) + return preCommit1 +} + +func (p *pieceProviderTestHarness) preCommit2(t *testing.T, pc1 specstorage.PreCommit1Out) { + sectorCids, err := p.mgr.SealPreCommit2(p.ctx, p.sector, pc1) + require.NoError(t, err) + commD := sectorCids.Unsealed + p.commD = commD +} + +func (p *pieceProviderTestHarness) isUnsealed(t *testing.T, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) bool { + b, err := p.pp.IsUnsealed(p.ctx, p.sector, offset, size) + require.NoError(t, err) + return b +} + +func (p *pieceProviderTestHarness) readPiece(t *testing.T, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, + expectedHadToUnseal bool, expectedBytes []byte) { + rd, isUnsealed, err := p.pp.ReadPiece(p.ctx, p.sector, offset, size, p.ticket, p.commD) + require.NoError(t, err) + require.NotNil(t, rd) + require.Equal(t, expectedHadToUnseal, isUnsealed) + defer func() { _ = rd.Close() }() + + // Make sure the input matches the output + readData, err := ioutil.ReadAll(rd) + require.NoError(t, err) + require.Equal(t, expectedBytes, readData) +} + +func (p *pieceProviderTestHarness) finalizeSector(t *testing.T, keepUnseal []specstorage.Range) { + require.NoError(t, p.mgr.FinalizeSector(p.ctx, p.sector, keepUnseal)) +} + +func (p *pieceProviderTestHarness) shutdown(t *testing.T) { + for _, svc := range p.servers { + s := svc + require.NoError(t, s.Shutdown(p.ctx)) + } +} diff --git a/extern/sector-storage/resources.go b/extern/sector-storage/resources.go index 7da3e96a6..2e989fdf4 100644 --- a/extern/sector-storage/resources.go +++ b/extern/sector-storage/resources.go @@ -313,7 +313,6 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources func init() { ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately - ResourceTable[sealtasks.TTReadUnsealed] = ResourceTable[sealtasks.TTFetch] // V1_1 is the same as V1 for _, m := range ResourceTable { diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go index 61411081a..aabf6f0ce 100644 --- a/extern/sector-storage/sched.go +++ b/extern/sector-storage/sched.go @@ -349,24 +349,24 @@ func (sh *scheduler) trySched() { defer sh.workersLk.RUnlock() windowsLen := len(sh.openWindows) - queuneLen := sh.schedQueue.Len() + queueLen := sh.schedQueue.Len() - log.Debugf("SCHED %d queued; %d open windows", queuneLen, windowsLen) + log.Debugf("SCHED %d queued; %d open windows", queueLen, windowsLen) - if windowsLen == 0 || queuneLen == 0 { + if windowsLen == 0 || queueLen == 0 { // nothing to schedule on return } windows := make([]schedWindow, windowsLen) - acceptableWindows := make([][]int, queuneLen) + acceptableWindows := make([][]int, queueLen) // Step 1 throttle := make(chan struct{}, windowsLen) var wg sync.WaitGroup - wg.Add(queuneLen) - for i := 0; i < queuneLen; i++ { + wg.Add(queueLen) + for i := 0; i < queueLen; i++ { throttle <- struct{}{} go func(sqi int) { @@ -393,7 +393,7 @@ func (sh *scheduler) trySched() { } // TODO: allow bigger windows - if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info.Resources) { + if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) { continue } @@ -451,27 +451,27 @@ func (sh *scheduler) trySched() { // Step 2 scheduled := 0 - rmQueue := make([]int, 0, queuneLen) + rmQueue := make([]int, 0, queueLen) - for sqi := 0; sqi < queuneLen; sqi++ { + for sqi := 0; sqi < queueLen; sqi++ { task := (*sh.schedQueue)[sqi] needRes := ResourceTable[task.taskType][task.sector.ProofType] selectedWindow := -1 for _, wnd := range acceptableWindows[task.indexHeap] { wid := sh.openWindows[wnd].worker - wr := sh.workers[wid].info.Resources + info := sh.workers[wid].info log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd) // TODO: allow bigger windows - if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", wr) { + if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) { continue } log.Debugf("SCHED ASSIGNED sqi:%d sector %d task %s to window %d", sqi, task.sector.ID.Number, task.taskType, wnd) - windows[wnd].allocated.add(wr, needRes) + windows[wnd].allocated.add(info.Resources, needRes) // TODO: We probably want to re-sort acceptableWindows here based on new // workerHandle.utilization + windows[wnd].allocated.utilization (workerHandle.utilization is used in all // task selectors, but not in the same way, so need to figure out how to do that in a non-O(n^2 way), and diff --git a/extern/sector-storage/sched_resources.go b/extern/sector-storage/sched_resources.go index 3e359c121..96a1fa863 100644 --- a/extern/sector-storage/sched_resources.go +++ b/extern/sector-storage/sched_resources.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error { +func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerInfo, r Resources, locker sync.Locker, cb func() error) error { for !a.canHandleRequest(r, id, "withResources", wr) { if a.cond == nil { a.cond = sync.NewCond(locker) @@ -14,11 +14,11 @@ func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResource a.cond.Wait() } - a.add(wr, r) + a.add(wr.Resources, r) err := cb() - a.free(wr, r) + a.free(wr.Resources, r) if a.cond != nil { a.cond.Broadcast() } @@ -44,8 +44,15 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { a.memUsedMax -= r.MaxMemory } -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, res storiface.WorkerResources) bool { +// canHandleRequest evaluates if the worker has enough available resources to +// handle the request. +func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, info storiface.WorkerInfo) bool { + if info.IgnoreResources { + // shortcircuit; if this worker is ignoring resources, it can always handle the request. + return true + } + res := info.Resources // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory if minNeedMem > res.MemPhysical { diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index 63f3de64d..fbc4d83ee 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -38,6 +38,20 @@ func TestWithPriority(t *testing.T) { require.Equal(t, 2222, getPriority(ctx)) } +var decentWorkerResources = storiface.WorkerResources{ + MemPhysical: 128 << 30, + MemSwap: 200 << 30, + MemReserved: 2 << 30, + CPUs: 32, + GPUs: []string{"a GPU"}, +} + +var constrainedWorkerResources = storiface.WorkerResources{ + MemPhysical: 1 << 30, + MemReserved: 2 << 30, + CPUs: 1, +} + type schedTestWorker struct { name string taskTypes map[sealtasks.TaskType]struct{} @@ -45,6 +59,9 @@ type schedTestWorker struct { closed bool session uuid.UUID + + resources storiface.WorkerResources + ignoreResources bool } func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { @@ -107,18 +124,11 @@ func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, erro return s.paths, nil } -var decentWorkerResources = storiface.WorkerResources{ - MemPhysical: 128 << 30, - MemSwap: 200 << 30, - MemReserved: 2 << 30, - CPUs: 32, - GPUs: []string{"a GPU"}, -} - func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { return storiface.WorkerInfo{ - Hostname: s.name, - Resources: decentWorkerResources, + Hostname: s.name, + IgnoreResources: s.ignoreResources, + Resources: s.resources, }, nil } @@ -137,13 +147,16 @@ func (s *schedTestWorker) Close() error { var _ Worker = &schedTestWorker{} -func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) { +func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}, resources storiface.WorkerResources, ignoreResources bool) { w := &schedTestWorker{ name: name, taskTypes: taskTypes, paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "food", CanSeal: true, CanStore: true}}, session: uuid.New(), + + resources: resources, + ignoreResources: ignoreResources, } for _, path := range w.paths { @@ -169,7 +182,7 @@ func TestSchedStartStop(t *testing.T) { sched := newScheduler() go sched.runSched() - addTestWorker(t, sched, stores.NewIndex(), "fred", nil) + addTestWorker(t, sched, stores.NewIndex(), "fred", nil, decentWorkerResources, false) require.NoError(t, sched.Close(context.TODO())) } @@ -183,6 +196,9 @@ func TestSched(t *testing.T) { type workerSpec struct { name string taskTypes map[sealtasks.TaskType]struct{} + + resources storiface.WorkerResources + ignoreResources bool } noopAction := func(ctx context.Context, w Worker) error { @@ -295,7 +311,7 @@ func TestSched(t *testing.T) { go sched.runSched() for _, worker := range workers { - addTestWorker(t, sched, index, worker.name, worker.taskTypes) + addTestWorker(t, sched, index, worker.name, worker.taskTypes, worker.resources, worker.ignoreResources) } rm := runMeta{ @@ -322,31 +338,42 @@ func TestSched(t *testing.T) { } } + // checks behaviour with workers with constrained resources + // the first one is not ignoring resource constraints, so we assign to the second worker, who is + t.Run("constrained-resources", testFunc([]workerSpec{ + {name: "fred1", resources: constrainedWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", resources: constrainedWorkerResources, ignoreResources: true, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + }, []task{ + sched("pc1-1", "fred2", 8, sealtasks.TTPreCommit1), + taskStarted("pc1-1"), + taskDone("pc1-1"), + })) + t.Run("one-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-1", testFunc([]workerSpec{ - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, + {name: "fred1", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-2workers-2", testFunc([]workerSpec{ - {name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, - {name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, + {name: "fred1", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred2", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1), taskDone("pc1-1"), })) t.Run("pc1-block-pc2", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("pc1"), @@ -359,7 +386,7 @@ func TestSched(t *testing.T) { })) t.Run("pc2-block-pc1", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ sched("pc2", "fred", 8, sealtasks.TTPreCommit2), taskStarted("pc2"), @@ -372,7 +399,7 @@ func TestSched(t *testing.T) { })) t.Run("pc1-batching", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}}, }, []task{ sched("t1", "fred", 8, sealtasks.TTPreCommit1), taskStarted("t1"), @@ -459,7 +486,7 @@ func TestSched(t *testing.T) { // run this one a bunch of times, it had a very annoying tendency to fail randomly for i := 0; i < 40; i++ { t.Run("pc1-pc2-prio", testFunc([]workerSpec{ - {name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, + {name: "fred", resources: decentWorkerResources, taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}}, }, []task{ // fill queues twoPC1("w0", 0, taskStarted), diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index 4e18e5c6f..7bc1affc3 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -296,7 +296,7 @@ func (sw *schedWorker) workerCompactWindows() { for ti, todo := range window.todo { needRes := ResourceTable[todo.taskType][todo.sector.ProofType] - if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info.Resources) { + if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) { continue } @@ -352,7 +352,7 @@ assignLoop: worker.lk.Lock() for t, todo := range firstWindow.todo { needRes := ResourceTable[todo.taskType][todo.sector.ProofType] - if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info.Resources) { + if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break } @@ -424,7 +424,7 @@ func (sw *schedWorker) startProcessingTask(taskDone chan struct{}, req *workerRe } // wait (if needed) for resources in the 'active' window - err = w.active.withResources(sw.wid, w.info.Resources, needRes, &sh.workersLk, func() error { + err = w.active.withResources(sw.wid, w.info, needRes, &sh.workersLk, func() error { w.lk.Lock() w.preparing.free(w.info.Resources, needRes) w.lk.Unlock() diff --git a/extern/sector-storage/sealtasks/task.go b/extern/sector-storage/sealtasks/task.go index 8dd14ca34..6d341a4b3 100644 --- a/extern/sector-storage/sealtasks/task.go +++ b/extern/sector-storage/sealtasks/task.go @@ -11,21 +11,19 @@ const ( TTFinalize TaskType = "seal/v0/finalize" - TTFetch TaskType = "seal/v0/fetch" - TTUnseal TaskType = "seal/v0/unseal" - TTReadUnsealed TaskType = "seal/v0/unsealread" + TTFetch TaskType = "seal/v0/fetch" + TTUnseal TaskType = "seal/v0/unseal" ) var order = map[TaskType]int{ - TTAddPiece: 6, // least priority - TTPreCommit1: 5, - TTPreCommit2: 4, - TTCommit2: 3, - TTCommit1: 2, - TTUnseal: 1, - TTFetch: -1, - TTReadUnsealed: -1, - TTFinalize: -2, // most priority + TTAddPiece: 6, // least priority + TTPreCommit1: 5, + TTPreCommit2: 4, + TTCommit2: 3, + TTCommit1: 2, + TTUnseal: 1, + TTFetch: -1, + TTFinalize: -2, // most priority } var shortNames = map[TaskType]string{ @@ -38,9 +36,8 @@ var shortNames = map[TaskType]string{ TTFinalize: "FIN", - TTFetch: "GET", - TTUnseal: "UNS", - TTReadUnsealed: "RD", + TTFetch: "GET", + TTUnseal: "UNS", } func (a TaskType) MuchLess(b TaskType) (bool, bool) { diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 3e3468470..5b8477fc8 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -5,11 +5,14 @@ import ( "io" "net/http" "os" + "strconv" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/tarutil" @@ -18,14 +21,39 @@ import ( var log = logging.Logger("stores") +var _ partialFileHandler = &DefaultPartialFileHandler{} + +// DefaultPartialFileHandler is the default implementation of the partialFileHandler interface. +// This is probably the only implementation we'll ever use because the purpose of the +// interface to is to mock out partial file related functionality during testing. +type DefaultPartialFileHandler struct{} + +func (d *DefaultPartialFileHandler) OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) { + return partialfile.OpenPartialFile(maxPieceSize, path) +} +func (d *DefaultPartialFileHandler) HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + return pf.HasAllocated(offset, size) +} + +func (d *DefaultPartialFileHandler) Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { + return pf.Reader(offset, size) +} + +// Close closes the partial file +func (d *DefaultPartialFileHandler) Close(pf *partialfile.PartialFile) error { + return pf.Close() +} + type FetchHandler struct { - *Local + Local Store + PfHandler partialFileHandler } func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/ mux := mux.NewRouter() mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET") + mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", handler.remoteGetAllocated).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET") mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE") @@ -54,6 +82,8 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request } } +// remoteGetSector returns the sector file/tared directory byte stream for the sectorID and sector file type sent in the request. +// returns an error if it does NOT have the required sector file/dir. func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) @@ -73,7 +103,6 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ } // The caller has a lock on this sector already, no need to get one here - // passing 0 spt because we don't allocate anything si := storage.SectorRef{ ID: id, @@ -82,7 +111,7 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) if err != nil { - log.Errorf("%+v", err) + log.Errorf("AcquireSector: %+v", err) w.WriteHeader(500) return } @@ -98,37 +127,38 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ stat, err := os.Stat(path) if err != nil { - log.Errorf("%+v", err) + log.Errorf("os.Stat: %+v", err) w.WriteHeader(500) return } - var rd io.Reader if stat.IsDir() { - rd, err = tarutil.TarDirectory(path) + if _, has := r.Header["Range"]; has { + log.Error("Range not supported on directories") + w.WriteHeader(500) + return + } + + rd, err := tarutil.TarDirectory(path) + if err != nil { + log.Errorf("%+v", err) + w.WriteHeader(500) + return + } + w.Header().Set("Content-Type", "application/x-tar") + w.WriteHeader(200) + if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { + log.Errorf("%+v", err) + return + } } else { - rd, err = os.OpenFile(path, os.O_RDONLY, 0644) // nolint w.Header().Set("Content-Type", "application/octet-stream") - } - if err != nil { - log.Errorf("%+v", err) - w.WriteHeader(500) - return - } - if !stat.IsDir() { - defer func() { - if err := rd.(*os.File).Close(); err != nil { - log.Errorf("closing source file: %+v", err) - } - }() + // will do a ranged read over the file at the given path if the caller has asked for a ranged read in the request headers. + http.ServeFile(w, r, path) } - w.WriteHeader(200) - if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { - log.Errorf("%+v", err) - return - } + log.Debugf("served sector file/dir, sectorID=%+v, fileType=%s, path=%s", id, ft, path) } func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) { @@ -149,13 +179,120 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R return } - if err := handler.Remove(r.Context(), id, ft, false); err != nil { + if err := handler.Local.Remove(r.Context(), id, ft, false); err != nil { log.Errorf("%+v", err) w.WriteHeader(500) return } } +// remoteGetAllocated returns `http.StatusOK` if the worker already has an Unsealed sector file +// containing the Unsealed piece sent in the request. +// returns `http.StatusRequestedRangeNotSatisfiable` otherwise. +func (handler *FetchHandler) remoteGetAllocated(w http.ResponseWriter, r *http.Request) { + log.Infof("SERVE Alloc check %s", r.URL) + vars := mux.Vars(r) + + id, err := storiface.ParseSectorID(vars["id"]) + if err != nil { + log.Errorf("parsing sectorID: %+v", err) + w.WriteHeader(500) + return + } + + ft, err := ftFromString(vars["type"]) + if err != nil { + log.Errorf("ftFromString: %+v", err) + w.WriteHeader(500) + return + } + if ft != storiface.FTUnsealed { + log.Errorf("/allocated only supports unsealed sector files") + w.WriteHeader(500) + return + } + + spti, err := strconv.ParseInt(vars["spt"], 10, 64) + if err != nil { + log.Errorf("parsing spt: %+v", err) + w.WriteHeader(500) + return + } + spt := abi.RegisteredSealProof(spti) + ssize, err := spt.SectorSize() + if err != nil { + log.Errorf("spt.SectorSize(): %+v", err) + w.WriteHeader(500) + return + } + + offi, err := strconv.ParseInt(vars["offset"], 10, 64) + if err != nil { + log.Errorf("parsing offset: %+v", err) + w.WriteHeader(500) + return + } + szi, err := strconv.ParseInt(vars["size"], 10, 64) + if err != nil { + log.Errorf("parsing size: %+v", err) + w.WriteHeader(500) + return + } + + // The caller has a lock on this sector already, no need to get one here + + // passing 0 spt because we don't allocate anything + si := storage.SectorRef{ + ID: id, + ProofType: 0, + } + + // get the path of the local Unsealed file for the given sector. + // return error if we do NOT have it. + paths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + log.Errorf("AcquireSector: %+v", err) + w.WriteHeader(500) + return + } + + path := storiface.PathByType(paths, ft) + if path == "" { + log.Error("acquired path was empty") + w.WriteHeader(500) + return + } + + // open the Unsealed file and check if it has the Unsealed sector for the piece at the given offset and size. + pf, err := handler.PfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + log.Error("opening partial file: ", err) + w.WriteHeader(500) + return + } + defer func() { + if err := pf.Close(); err != nil { + log.Error("closing partial file: ", err) + } + }() + + has, err := handler.PfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offi), abi.UnpaddedPieceSize(szi)) + if err != nil { + log.Error("has allocated: ", err) + w.WriteHeader(500) + return + } + + if has { + log.Debugf("returning ok: worker has unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d", id, offi, szi) + w.WriteHeader(http.StatusOK) + return + } + + log.Debugf("returning StatusRequestedRangeNotSatisfiable: worker does NOT have unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d", id, offi, szi) + w.WriteHeader(http.StatusRequestedRangeNotSatisfiable) +} + func ftFromString(t string) (storiface.SectorFileType, error) { switch t { case storiface.FTUnsealed.String(): diff --git a/extern/sector-storage/stores/http_handler_test.go b/extern/sector-storage/stores/http_handler_test.go new file mode 100644 index 000000000..1258d8530 --- /dev/null +++ b/extern/sector-storage/stores/http_handler_test.go @@ -0,0 +1,457 @@ +package stores_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/stores/mocks" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/specs-storage/storage" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +func TestRemoteGetAllocated(t *testing.T) { + + emptyPartialFile := &partialfile.PartialFile{} + pfPath := "path" + expectedSectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 0, + } + + validSectorName := fmt.Sprintf("s-t0%d-%d", 123, 123) + validSectorFileType := storiface.FTUnsealed.String() + validSectorType := "1" + sectorSize := abi.SealProofInfos[1].SectorSize + + validOffset := "100" + validOffsetInt := 100 + + validSize := "1000" + validSizeInt := 1000 + + type pieceInfo struct { + sectorName string + fileType string + sectorType string + + // piece info + offset string + size string + } + validPieceInfo := pieceInfo{ + sectorName: validSectorName, + fileType: validSectorFileType, + sectorType: validSectorType, + offset: validOffset, + size: validSize, + } + + tcs := map[string]struct { + piFnc func(pi *pieceInfo) + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + + // expectation + expectedStatusCode int + }{ + "fails when sector name is invalid": { + piFnc: func(pi *pieceInfo) { + pi.sectorName = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when file type is invalid": { + piFnc: func(pi *pieceInfo) { + pi.fileType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when sector proof type is invalid": { + piFnc: func(pi *pieceInfo) { + pi.sectorType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when offset is invalid": { + piFnc: func(pi *pieceInfo) { + pi.offset = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when size is invalid": { + piFnc: func(pi *pieceInfo) { + pi.size = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + }, + "fails when errors out during acquiring unsealed sector file": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, xerrors.New("some error")).Times(1) + }, + }, + "fails when unsealed sector file is not found locally": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{}, + storiface.SectorPaths{}, nil).Times(1) + }, + }, + "fails when error while opening partial file": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, + xerrors.New("some error")).Times(1) + }, + }, + + "fails when determining partial file allocation returns an error": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(true, xerrors.New("some error")).Times(1) + }, + }, + "StatusRequestedRangeNotSatisfiable when piece is NOT allocated in partial file": { + expectedStatusCode: http.StatusRequestedRangeNotSatisfiable, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(false, nil).Times(1) + }, + }, + "OK when piece is allocated in partial file": { + expectedStatusCode: http.StatusOK, + storeFnc: func(l *mocks.MockStore) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, nil).Times(1) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, + nil).Times(1) + + pf.EXPECT().HasAllocated(emptyPartialFile, storiface.UnpaddedByteIndex(validOffsetInt), + abi.UnpaddedPieceSize(validSizeInt)).Return(true, nil).Times(1) + }, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + + handler := &stores.FetchHandler{ + lstore, + pfhandler, + } + + // run http server + ts := httptest.NewServer(handler) + defer ts.Close() + + pi := validPieceInfo + + if tc.piFnc != nil { + tc.piFnc(&pi) + } + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + // call remoteGetAllocated + url := fmt.Sprintf("%s/remote/%s/%s/%s/allocated/%s/%s", + ts.URL, + pi.fileType, + pi.sectorName, + pi.sectorType, + pi.offset, + pi.size) + resp, err := http.Get(url) + require.NoError(t, err) + defer func() { + _ = resp.Body.Close() + }() + + // assert expected status code + require.Equal(t, tc.expectedStatusCode, resp.StatusCode) + }) + } +} + +func TestRemoteGetSector(t *testing.T) { + str := "hello-world" + fileBytes := []byte(str) + + validSectorName := fmt.Sprintf("s-t0%d-%d", 123, 123) + validSectorFileType := storiface.FTUnsealed.String() + expectedSectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 0, + } + + type sectorInfo struct { + sectorName string + fileType string + } + validSectorInfo := sectorInfo{ + sectorName: validSectorName, + fileType: validSectorFileType, + } + + tcs := map[string]struct { + siFnc func(pi *sectorInfo) + storeFnc func(s *mocks.MockStore, path string) + + // reading a file or a dir + isDir bool + + // expectation + noResponseBytes bool + expectedContentType string + expectedStatusCode int + expectedResponseBytes []byte + }{ + "fails when sector name is invalid": { + siFnc: func(si *sectorInfo) { + si.sectorName = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when file type is invalid": { + siFnc: func(si *sectorInfo) { + si.fileType = "invalid" + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when error while acquiring sector file": { + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, xerrors.New("some error")).Times(1) + }, + expectedStatusCode: http.StatusInternalServerError, + noResponseBytes: true, + }, + "fails when acquired sector file path is empty": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{}, + storiface.SectorPaths{}, nil).Times(1) + }, + noResponseBytes: true, + }, + "fails when acquired file does not exist": { + expectedStatusCode: http.StatusInternalServerError, + storeFnc: func(l *mocks.MockStore, _ string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: "path", + }, + storiface.SectorPaths{}, nil) + }, + noResponseBytes: true, + }, + "successfully read a sector file": { + storeFnc: func(l *mocks.MockStore, path string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: path, + }, + storiface.SectorPaths{}, nil) + }, + + noResponseBytes: false, + expectedContentType: "application/octet-stream", + expectedStatusCode: 200, + expectedResponseBytes: fileBytes, + }, + "successfully read a sector dir": { + storeFnc: func(l *mocks.MockStore, path string) { + + l.EXPECT().AcquireSector(gomock.Any(), expectedSectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: path, + }, + storiface.SectorPaths{}, nil) + }, + + isDir: true, + noResponseBytes: false, + expectedContentType: "application/x-tar", + expectedStatusCode: 200, + expectedResponseBytes: fileBytes, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + + var path string + + if !tc.isDir { + // create file + tempFile, err := ioutil.TempFile("", "TestRemoteGetSector-") + require.NoError(t, err) + + defer func() { + _ = os.Remove(tempFile.Name()) + }() + + _, err = tempFile.Write(fileBytes) + require.NoError(t, err) + path = tempFile.Name() + } else { + // create dir with a file + tempFile2, err := ioutil.TempFile("", "TestRemoteGetSector-") + require.NoError(t, err) + defer func() { + _ = os.Remove(tempFile2.Name()) + }() + + stat, err := os.Stat(tempFile2.Name()) + require.NoError(t, err) + tempDir, err := ioutil.TempDir("", "TestRemoteGetSector-") + require.NoError(t, err) + + defer func() { + _ = os.RemoveAll(tempDir) + }() + + require.NoError(t, os.Rename(tempFile2.Name(), filepath.Join(tempDir, stat.Name()))) + + path = tempDir + } + + handler := &stores.FetchHandler{ + lstore, + pfhandler, + } + + // run http server + ts := httptest.NewServer(handler) + defer ts.Close() + + si := validSectorInfo + if tc.siFnc != nil { + tc.siFnc(&si) + } + + if tc.storeFnc != nil { + tc.storeFnc(lstore, path) + } + + // call remoteGetAllocated + url := fmt.Sprintf("%s/remote/%s/%s", + ts.URL, + si.fileType, + si.sectorName, + ) + resp, err := http.Get(url) + require.NoError(t, err) + defer func() { + _ = resp.Body.Close() + }() + + bz, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + // assert expected status code + require.Equal(t, tc.expectedStatusCode, resp.StatusCode) + + if !tc.noResponseBytes { + if !tc.isDir { + require.EqualValues(t, tc.expectedResponseBytes, bz) + } + } + + require.Equal(t, tc.expectedContentType, resp.Header.Get("Content-Type")) + }) + } +} diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index 4acc2ecdb..9fd7f6d7d 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -3,6 +3,7 @@ package stores import ( "context" "errors" + "fmt" "net/url" gopath "path" "sort" @@ -65,6 +66,8 @@ type SectorIndex interface { // part of storage-miner api // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) + + StorageList(ctx context.Context) (map[ID][]Decl, error) } type Decl struct { @@ -383,7 +386,16 @@ func (i *Index) StorageBestAlloc(ctx context.Context, allocate storiface.SectorF var candidates []storageEntry - spaceReq, err := allocate.SealSpaceUse(ssize) + var err error + var spaceReq uint64 + switch pathType { + case storiface.PathSealing: + spaceReq, err = allocate.SealSpaceUse(ssize) + case storiface.PathStorage: + spaceReq, err = allocate.StoreSpaceUse(ssize) + default: + panic(fmt.Sprintf("unexpected pathType: %s", pathType)) + } if err != nil { return nil, xerrors.Errorf("estimating required space: %w", err) } diff --git a/extern/sector-storage/stores/interface.go b/extern/sector-storage/stores/interface.go index a997ad3d2..4986e6c80 100644 --- a/extern/sector-storage/stores/interface.go +++ b/extern/sector-storage/stores/interface.go @@ -2,8 +2,10 @@ package stores import ( "context" + "os" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/specs-storage/storage" @@ -11,6 +13,23 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) +// PartialFileHandler helps mock out the partial file functionality during testing. +type partialFileHandler interface { + // OpenPartialFile opens and returns a partial file at the given path and also verifies it has the given + // size + OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) + + // HasAllocated returns true if the given partial file has an unsealed piece starting at the given offset with the given size. + // returns false otherwise. + HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) + + // Reader returns a file from which we can read the unsealed piece in the partial file. + Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) + + // Close closes the partial file + Close(pf *partialfile.PartialFile) error +} + type Store interface { AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error @@ -23,4 +42,6 @@ type Store interface { MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) + + Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) } diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index 5a10b21b9..cac160139 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -158,6 +158,8 @@ func (p *path) sectorPath(sid abi.SectorID, fileType storiface.SectorFileType) s return filepath.Join(p.local, fileType.String(), storiface.SectorName(sid)) } +type URLs []string + func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { l := &Local{ localStorage: ls, diff --git a/extern/sector-storage/stores/mocks/index.go b/extern/sector-storage/stores/mocks/index.go new file mode 100644 index 000000000..59a6017b5 --- /dev/null +++ b/extern/sector-storage/stores/mocks/index.go @@ -0,0 +1,184 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: index.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + stores "github.com/filecoin-project/lotus/extern/sector-storage/stores" + storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + gomock "github.com/golang/mock/gomock" +) + +// MockSectorIndex is a mock of SectorIndex interface. +type MockSectorIndex struct { + ctrl *gomock.Controller + recorder *MockSectorIndexMockRecorder +} + +// MockSectorIndexMockRecorder is the mock recorder for MockSectorIndex. +type MockSectorIndexMockRecorder struct { + mock *MockSectorIndex +} + +// NewMockSectorIndex creates a new mock instance. +func NewMockSectorIndex(ctrl *gomock.Controller) *MockSectorIndex { + mock := &MockSectorIndex{ctrl: ctrl} + mock.recorder = &MockSectorIndexMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSectorIndex) EXPECT() *MockSectorIndexMockRecorder { + return m.recorder +} + +// StorageAttach mocks base method. +func (m *MockSectorIndex) StorageAttach(arg0 context.Context, arg1 stores.StorageInfo, arg2 fsutil.FsStat) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageAttach", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageAttach indicates an expected call of StorageAttach. +func (mr *MockSectorIndexMockRecorder) StorageAttach(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageAttach", reflect.TypeOf((*MockSectorIndex)(nil).StorageAttach), arg0, arg1, arg2) +} + +// StorageBestAlloc mocks base method. +func (m *MockSectorIndex) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageBestAlloc", ctx, allocate, ssize, pathType) + ret0, _ := ret[0].([]stores.StorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageBestAlloc indicates an expected call of StorageBestAlloc. +func (mr *MockSectorIndexMockRecorder) StorageBestAlloc(ctx, allocate, ssize, pathType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageBestAlloc", reflect.TypeOf((*MockSectorIndex)(nil).StorageBestAlloc), ctx, allocate, ssize, pathType) +} + +// StorageDeclareSector mocks base method. +func (m *MockSectorIndex) StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageDeclareSector", ctx, storageID, s, ft, primary) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageDeclareSector indicates an expected call of StorageDeclareSector. +func (mr *MockSectorIndexMockRecorder) StorageDeclareSector(ctx, storageID, s, ft, primary interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDeclareSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDeclareSector), ctx, storageID, s, ft, primary) +} + +// StorageDropSector mocks base method. +func (m *MockSectorIndex) StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageDropSector", ctx, storageID, s, ft) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageDropSector indicates an expected call of StorageDropSector. +func (mr *MockSectorIndexMockRecorder) StorageDropSector(ctx, storageID, s, ft interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDropSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDropSector), ctx, storageID, s, ft) +} + +// StorageFindSector mocks base method. +func (m *MockSectorIndex) StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageFindSector", ctx, sector, ft, ssize, allowFetch) + ret0, _ := ret[0].([]stores.SectorStorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageFindSector indicates an expected call of StorageFindSector. +func (mr *MockSectorIndexMockRecorder) StorageFindSector(ctx, sector, ft, ssize, allowFetch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageFindSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageFindSector), ctx, sector, ft, ssize, allowFetch) +} + +// StorageInfo mocks base method. +func (m *MockSectorIndex) StorageInfo(arg0 context.Context, arg1 stores.ID) (stores.StorageInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageInfo", arg0, arg1) + ret0, _ := ret[0].(stores.StorageInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageInfo indicates an expected call of StorageInfo. +func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageInfo", reflect.TypeOf((*MockSectorIndex)(nil).StorageInfo), arg0, arg1) +} + +// StorageList mocks base method. +func (m *MockSectorIndex) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageList", ctx) + ret0, _ := ret[0].(map[stores.ID][]stores.Decl) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageList indicates an expected call of StorageList. +func (mr *MockSectorIndexMockRecorder) StorageList(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageList", reflect.TypeOf((*MockSectorIndex)(nil).StorageList), ctx) +} + +// StorageLock mocks base method. +func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageLock", ctx, sector, read, write) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageLock indicates an expected call of StorageLock. +func (mr *MockSectorIndexMockRecorder) StorageLock(ctx, sector, read, write interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageLock), ctx, sector, read, write) +} + +// StorageReportHealth mocks base method. +func (m *MockSectorIndex) StorageReportHealth(arg0 context.Context, arg1 stores.ID, arg2 stores.HealthReport) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageReportHealth", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorageReportHealth indicates an expected call of StorageReportHealth. +func (mr *MockSectorIndexMockRecorder) StorageReportHealth(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageReportHealth", reflect.TypeOf((*MockSectorIndex)(nil).StorageReportHealth), arg0, arg1, arg2) +} + +// StorageTryLock mocks base method. +func (m *MockSectorIndex) StorageTryLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageTryLock", ctx, sector, read, write) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageTryLock indicates an expected call of StorageTryLock. +func (mr *MockSectorIndexMockRecorder) StorageTryLock(ctx, sector, read, write interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageTryLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageTryLock), ctx, sector, read, write) +} diff --git a/extern/sector-storage/stores/mocks/stores.go b/extern/sector-storage/stores/mocks/stores.go new file mode 100644 index 000000000..fdfd73a07 --- /dev/null +++ b/extern/sector-storage/stores/mocks/stores.go @@ -0,0 +1,212 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + os "os" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + partialfile "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + stores "github.com/filecoin-project/lotus/extern/sector-storage/stores" + storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + storage "github.com/filecoin-project/specs-storage/storage" + gomock "github.com/golang/mock/gomock" +) + +// MockpartialFileHandler is a mock of partialFileHandler interface. +type MockpartialFileHandler struct { + ctrl *gomock.Controller + recorder *MockpartialFileHandlerMockRecorder +} + +// MockpartialFileHandlerMockRecorder is the mock recorder for MockpartialFileHandler. +type MockpartialFileHandlerMockRecorder struct { + mock *MockpartialFileHandler +} + +// NewMockpartialFileHandler creates a new mock instance. +func NewMockpartialFileHandler(ctrl *gomock.Controller) *MockpartialFileHandler { + mock := &MockpartialFileHandler{ctrl: ctrl} + mock.recorder = &MockpartialFileHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockpartialFileHandler) EXPECT() *MockpartialFileHandlerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockpartialFileHandler) Close(pf *partialfile.PartialFile) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close", pf) + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockpartialFileHandlerMockRecorder) Close(pf interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockpartialFileHandler)(nil).Close), pf) +} + +// HasAllocated mocks base method. +func (m *MockpartialFileHandler) HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasAllocated", pf, offset, size) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasAllocated indicates an expected call of HasAllocated. +func (mr *MockpartialFileHandlerMockRecorder) HasAllocated(pf, offset, size interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasAllocated", reflect.TypeOf((*MockpartialFileHandler)(nil).HasAllocated), pf, offset, size) +} + +// OpenPartialFile mocks base method. +func (m *MockpartialFileHandler) OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OpenPartialFile", maxPieceSize, path) + ret0, _ := ret[0].(*partialfile.PartialFile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OpenPartialFile indicates an expected call of OpenPartialFile. +func (mr *MockpartialFileHandlerMockRecorder) OpenPartialFile(maxPieceSize, path interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenPartialFile", reflect.TypeOf((*MockpartialFileHandler)(nil).OpenPartialFile), maxPieceSize, path) +} + +// Reader mocks base method. +func (m *MockpartialFileHandler) Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reader", pf, offset, size) + ret0, _ := ret[0].(*os.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reader indicates an expected call of Reader. +func (mr *MockpartialFileHandlerMockRecorder) Reader(pf, offset, size interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockpartialFileHandler)(nil).Reader), pf, offset, size) +} + +// MockStore is a mock of Store interface. +type MockStore struct { + ctrl *gomock.Controller + recorder *MockStoreMockRecorder +} + +// MockStoreMockRecorder is the mock recorder for MockStore. +type MockStoreMockRecorder struct { + mock *MockStore +} + +// NewMockStore creates a new mock instance. +func NewMockStore(ctrl *gomock.Controller) *MockStore { + mock := &MockStore{ctrl: ctrl} + mock.recorder = &MockStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStore) EXPECT() *MockStoreMockRecorder { + return m.recorder +} + +// AcquireSector mocks base method. +func (m *MockStore) AcquireSector(ctx context.Context, s storage.SectorRef, existing, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcquireSector", ctx, s, existing, allocate, sealing, op) + ret0, _ := ret[0].(storiface.SectorPaths) + ret1, _ := ret[1].(storiface.SectorPaths) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// AcquireSector indicates an expected call of AcquireSector. +func (mr *MockStoreMockRecorder) AcquireSector(ctx, s, existing, allocate, sealing, op interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireSector", reflect.TypeOf((*MockStore)(nil).AcquireSector), ctx, s, existing, allocate, sealing, op) +} + +// FsStat mocks base method. +func (m *MockStore) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FsStat", ctx, id) + ret0, _ := ret[0].(fsutil.FsStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FsStat indicates an expected call of FsStat. +func (mr *MockStoreMockRecorder) FsStat(ctx, id interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), ctx, id) +} + +// MoveStorage mocks base method. +func (m *MockStore) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MoveStorage", ctx, s, types) + ret0, _ := ret[0].(error) + return ret0 +} + +// MoveStorage indicates an expected call of MoveStorage. +func (mr *MockStoreMockRecorder) MoveStorage(ctx, s, types interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveStorage", reflect.TypeOf((*MockStore)(nil).MoveStorage), ctx, s, types) +} + +// Remove mocks base method. +func (m *MockStore) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Remove", ctx, s, types, force) + ret0, _ := ret[0].(error) + return ret0 +} + +// Remove indicates an expected call of Remove. +func (mr *MockStoreMockRecorder) Remove(ctx, s, types, force interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockStore)(nil).Remove), ctx, s, types, force) +} + +// RemoveCopies mocks base method. +func (m *MockStore) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveCopies", ctx, s, types) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveCopies indicates an expected call of RemoveCopies. +func (mr *MockStoreMockRecorder) RemoveCopies(ctx, s, types interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveCopies", reflect.TypeOf((*MockStore)(nil).RemoveCopies), ctx, s, types) +} + +// Reserve mocks base method. +func (m *MockStore) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reserve", ctx, sid, ft, storageIDs, overheadTab) + ret0, _ := ret[0].(func()) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reserve indicates an expected call of Reserve. +func (mr *MockStoreMockRecorder) Reserve(ctx, sid, ft, storageIDs, overheadTab interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reserve", reflect.TypeOf((*MockStore)(nil).Reserve), ctx, sid, ft, storageIDs, overheadTab) +} diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index 4388a2ffb..6f8efc03e 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -3,6 +3,7 @@ package stores import ( "context" "encoding/json" + "fmt" "io" "io/ioutil" "math/bits" @@ -31,7 +32,7 @@ var FetchTempSubdir = "fetching" var CopyBuf = 1 << 20 type Remote struct { - local *Local + local Store index SectorIndex auth http.Header @@ -39,6 +40,8 @@ type Remote struct { fetchLk sync.Mutex fetching map[abi.SectorID]chan struct{} + + pfHandler partialFileHandler } func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { @@ -49,7 +52,7 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storifa return r.local.RemoveCopies(ctx, s, types) } -func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int) *Remote { +func NewRemote(local Store, index SectorIndex, auth http.Header, fetchLimit int, pfHandler partialFileHandler) *Remote { return &Remote{ local: local, index: index, @@ -57,7 +60,8 @@ func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int limit: make(chan struct{}, fetchLimit), - fetching: map[abi.SectorID]chan struct{}{}, + fetching: map[abi.SectorID]chan struct{}{}, + pfHandler: pfHandler, } } @@ -293,6 +297,32 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { } } +func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.RegisteredSealProof, offset, size abi.PaddedPieceSize) (bool, error) { + url = fmt.Sprintf("%s/%d/allocated/%d/%d", url, spt, offset.Unpadded(), size.Unpadded()) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return false, xerrors.Errorf("request: %w", err) + } + req.Header = r.auth.Clone() + fmt.Printf("req using header: %#v \n", r.auth) + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, xerrors.Errorf("do request: %w", err) + } + defer resp.Body.Close() // nolint + + switch resp.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusRequestedRangeNotSatisfiable: + return false, nil + default: + return false, xerrors.Errorf("unexpected http response: %d", resp.StatusCode) + } +} + func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { // Make sure we have the data local _, _, err := r.AcquireSector(ctx, s, types, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) @@ -415,4 +445,240 @@ func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) { return out, nil } +func (r *Remote) readRemote(ctx context.Context, url string, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { + if len(r.limit) >= cap(r.limit) { + log.Infof("Throttling remote read, %d already running", len(r.limit)) + } + + // TODO: Smarter throttling + // * Priority (just going sequentially is still pretty good) + // * Per interface + // * Aware of remote load + select { + case r.limit <- struct{}{}: + defer func() { <-r.limit }() + case <-ctx.Done(): + return nil, xerrors.Errorf("context error while waiting for fetch limiter: %w", ctx.Err()) + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, xerrors.Errorf("request: %w", err) + } + + if r.auth != nil { + req.Header = r.auth.Clone() + } + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+size-1)) + req = req.WithContext(ctx) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, xerrors.Errorf("do request: %w", err) + } + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() // nolint + return nil, xerrors.Errorf("non-200 code: %d", resp.StatusCode) + } + + return resp.Body, nil +} + +// CheckIsUnsealed checks if we have an unsealed piece at the given offset in an already unsealed sector file for the given piece +// either locally or on any of the workers. +// Returns true if we have the unsealed piece, false otherwise. +func (r *Remote) CheckIsUnsealed(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (bool, error) { + ft := storiface.FTUnsealed + + paths, _, err := r.local.AcquireSector(ctx, s, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + return false, xerrors.Errorf("acquire local: %w", err) + } + + path := storiface.PathByType(paths, ft) + if path != "" { + // if we have the unsealed file locally, check if it has the unsealed piece. + log.Infof("Read local %s (+%d,%d)", path, offset, size) + ssize, err := s.ProofType.SectorSize() + if err != nil { + return false, err + } + + // open the unsealed sector file for the given sector size located at the given path. + pf, err := r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return false, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file opened %s (+%d,%d)", path, offset, size) + + // even though we have an unsealed file for the given sector, we still need to determine if we have the unsealed piece + // in the unsealed sector file. That is what `HasAllocated` checks for. + has, err := r.pfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offset.Unpadded()), size.Unpadded()) + if err != nil { + return false, xerrors.Errorf("has allocated: %w", err) + } + + // close the local unsealed file. + if err := r.pfHandler.Close(pf); err != nil { + return false, xerrors.Errorf("failed to close partial file: %s", err) + } + log.Debugf("checked if local partial file has the piece %s (+%d,%d), returning answer=%t", path, offset, size, has) + + // Sector files can technically not have a piece unsealed locally, but have it unsealed in remote storage, so we probably + // want to return only if has is true + if has { + return has, nil + } + } + + // --- We don't have the unsealed piece in an unsealed sector file locally + // Check if we have it in a remote cluster. + + si, err := r.index.StorageFindSector(ctx, s.ID, ft, 0, false) + if err != nil { + return false, xerrors.Errorf("StorageFindSector: %s", err) + } + + if len(si) == 0 { + return false, nil + } + + sort.Slice(si, func(i, j int) bool { + return si[i].Weight < si[j].Weight + }) + + for _, info := range si { + for _, url := range info.URLs { + ok, err := r.checkAllocated(ctx, url, s.ProofType, offset, size) + if err != nil { + log.Warnw("check if remote has piece", "url", url, "error", err) + continue + } + if !ok { + continue + } + + return true, nil + } + } + + return false, nil +} + +// Reader returns a reader for an unsealed piece at the given offset in the given sector. +// If the Miner has the unsealed piece locally, it will return a reader that reads from the local copy. +// If the Miner does NOT have the unsealed piece locally, it will query all workers that have the unsealed sector file +// to know if they have the unsealed piece and will then read the unsealed piece data from a worker that has it. +// +// Returns a nil reader if : +// 1. no worker(local worker included) has an unsealed file for the given sector OR +// 2. no worker(local worker included) has the unsealed piece in their unsealed sector file. +// Will return a nil reader and a nil error in such a case. +func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { + ft := storiface.FTUnsealed + + // check if we have the unsealed sector file locally + paths, _, err := r.local.AcquireSector(ctx, s, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + if err != nil { + return nil, xerrors.Errorf("acquire local: %w", err) + } + + path := storiface.PathByType(paths, ft) + + if path != "" { + // if we have the unsealed file locally, return a reader that can be used to read the contents of the + // unsealed piece. + log.Debugf("Check local %s (+%d,%d)", path, offset, size) + ssize, err := s.ProofType.SectorSize() + if err != nil { + return nil, err + } + log.Debugf("fetched sector size %s (+%d,%d)", path, offset, size) + + // open the unsealed sector file for the given sector size located at the given path. + pf, err := r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return nil, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file opened %s (+%d,%d)", path, offset, size) + + // even though we have an unsealed file for the given sector, we still need to determine if we have the unsealed piece + // in the unsealed sector file. That is what `HasAllocated` checks for. + has, err := r.pfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offset.Unpadded()), size.Unpadded()) + if err != nil { + return nil, xerrors.Errorf("has allocated: %w", err) + } + log.Debugf("check if partial file is allocated %s (+%d,%d)", path, offset, size) + + if has { + log.Infof("returning piece reader for local unsealed piece sector=%+v, (offset=%d, size=%d)", s.ID, offset, size) + return r.pfHandler.Reader(pf, storiface.PaddedByteIndex(offset), size) + } + + log.Debugf("miner has unsealed file but not unseal piece, %s (+%d,%d)", path, offset, size) + if err := r.pfHandler.Close(pf); err != nil { + return nil, xerrors.Errorf("close partial file: %w", err) + } + } + + // --- We don't have the unsealed piece in an unsealed sector file locally + + // if we don't have the unsealed sector file locally, we'll first lookup the Miner Sector Store Index + // to determine which workers have the unsealed file and then query those workers to know + // if they have the unsealed piece in the unsealed sector file. + si, err := r.index.StorageFindSector(ctx, s.ID, ft, 0, false) + if err != nil { + log.Debugf("Reader, did not find unsealed file on any of the workers %s (+%d,%d)", path, offset, size) + return nil, err + } + + if len(si) == 0 { + return nil, xerrors.Errorf("failed to read sector %v from remote(%d): %w", s, ft, storiface.ErrSectorNotFound) + } + + sort.Slice(si, func(i, j int) bool { + return si[i].Weight > si[j].Weight + }) + + var lastErr error + for _, info := range si { + for _, url := range info.URLs { + // checkAllocated makes a JSON RPC query to a remote worker to determine if it has + // unsealed piece in their unsealed sector file. + ok, err := r.checkAllocated(ctx, url, s.ProofType, offset, size) + if err != nil { + log.Warnw("check if remote has piece", "url", url, "error", err) + lastErr = err + continue + } + if !ok { + continue + } + + // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. + // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. + rd, err := r.readRemote(ctx, url, offset, size) + if err != nil { + log.Warnw("reading from remote", "url", url, "error", err) + lastErr = err + continue + } + log.Infof("Read remote %s (+%d,%d)", url, offset, size) + return rd, nil + } + } + + // we couldn't find a unsealed file with the unsealed piece, will return a nil reader. + log.Debugf("returning nil reader, did not find unsealed piece for %+v (+%d,%d), last error=%s", s, offset, size, lastErr) + return nil, nil +} + +func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { + log.Warnf("reserve called on remote store, sectorID: %v", sid.ID) + return func() { + + }, nil +} + var _ Store = &Remote{} diff --git a/extern/sector-storage/stores/remote_test.go b/extern/sector-storage/stores/remote_test.go new file mode 100644 index 000000000..b708bb68f --- /dev/null +++ b/extern/sector-storage/stores/remote_test.go @@ -0,0 +1,741 @@ +package stores_test + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/stores/mocks" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/specs-storage/storage" + "github.com/golang/mock/gomock" + "github.com/gorilla/mux" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +func TestReader(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + bz := []byte("Hello World") + + pfPath := "path" + emptyPartialFile := &partialfile.PartialFile{} + sectorSize := abi.SealProofInfos[1].SectorSize + + ft := storiface.FTUnsealed + + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 1, + } + + offset := abi.PaddedPieceSize(100) + size := abi.PaddedPieceSize(1000) + ctx := context.Background() + + tcs := map[string]struct { + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + indexFnc func(s *mocks.MockSectorIndex, serverURL string) + + needHttpServer bool + + getAllocatedReturnCode int + getSectorReturnCode int + + serverUrl string + + // expectation + errStr string + expectedNonNilReader bool + expectedSectorBytes []byte + }{ + + // -------- have the unsealed file locally + "fails when error while acquiring unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, xerrors.New("acquire error")) + }, + + errStr: "acquire error", + }, + + "fails when error while opening local partial (unsealed) file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) + }, + errStr: "pf open error", + }, + + "fails when error while checking if local unsealed file has piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, xerrors.New("piece check error")) + }, + + errStr: "piece check error", + }, + + "fails when error while closing local unsealed file that does not have the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + pf.EXPECT().Close(emptyPartialFile).Return(xerrors.New("close error")).Times(1) + }, + errStr: "close error", + }, + + "fails when error while fetching reader for the local unsealed file that has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + mockPfReader(pf, emptyPartialFile, offset, size, nil, xerrors.New("reader error")) + + }, + errStr: "reader error", + }, + + // ------------------- don't have the unsealed file locally + + "fails when error while finding sector": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, xerrors.New("find sector error")) + }, + errStr: "find sector error", + }, + + "fails when no worker has unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, nil) + }, + errStr: storiface.ErrSectorNotFound.Error(), + }, + + // --- nil reader when local unsealed file does NOT have unsealed piece + "nil reader when local unsealed file does not have the unsealed piece and remote sector also dosen't have the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + // ---- nil reader when none of the remote unsealed file has unsealed piece + "nil reader when none of the worker has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + "nil reader when none of the worker is able to serve the unsealed piece even though they have it": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 500, + getAllocatedReturnCode: 200, + }, + + // ---- Success for local unsealed file + "successfully fetches reader for piece from local unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + + f, err := ioutil.TempFile("", "TestReader-") + require.NoError(t, err) + _, err = f.Write(bz) + require.NoError(t, err) + require.NoError(t, f.Close()) + f, err = os.Open(f.Name()) + require.NoError(t, err) + + mockPfReader(pf, emptyPartialFile, offset, size, f, nil) + + }, + + expectedNonNilReader: true, + expectedSectorBytes: bz, + }, + + // --- Success for remote unsealed file + // --- Success for remote unsealed file + "successfully fetches reader from remote unsealed piece when local unsealed file does NOT have the unsealed Piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 200, + getAllocatedReturnCode: 200, + expectedSectorBytes: bz, + expectedNonNilReader: true, + }, + + "successfully fetches reader for piece from remote unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getSectorReturnCode: 200, + getAllocatedReturnCode: 200, + expectedSectorBytes: bz, + expectedNonNilReader: true, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + index := mocks.NewMockSectorIndex(mockCtrl) + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + if tc.needHttpServer { + // run http server + ts := httptest.NewServer(&mockHttpServer{ + expectedSectorName: storiface.SectorName(sectorRef.ID), + expectedFileType: ft.String(), + expectedOffset: fmt.Sprintf("%d", offset.Unpadded()), + expectedSize: fmt.Sprintf("%d", size.Unpadded()), + expectedSectorType: fmt.Sprintf("%d", sectorRef.ProofType), + + getAllocatedReturnCode: tc.getAllocatedReturnCode, + getSectorReturnCode: tc.getSectorReturnCode, + getSectorBytes: tc.expectedSectorBytes, + }) + defer ts.Close() + tc.serverUrl = fmt.Sprintf("%s/remote/%s/%s", ts.URL, ft.String(), storiface.SectorName(sectorRef.ID)) + } + if tc.indexFnc != nil { + tc.indexFnc(index, tc.serverUrl) + } + + remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) + + rd, err := remoteStore.Reader(ctx, sectorRef, offset, size) + + if tc.errStr != "" { + require.Error(t, err) + require.Nil(t, rd) + require.Contains(t, err.Error(), tc.errStr) + } else { + require.NoError(t, err) + } + + if !tc.expectedNonNilReader { + require.Nil(t, rd) + } else { + require.NotNil(t, rd) + defer func() { + require.NoError(t, rd.Close()) + }() + + if f, ok := rd.(*os.File); ok { + require.NoError(t, os.Remove(f.Name())) + } + + bz, err := ioutil.ReadAll(rd) + require.NoError(t, err) + require.Equal(t, tc.expectedSectorBytes, bz) + } + + }) + } +} + +func TestCheckIsUnsealed(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + pfPath := "path" + ft := storiface.FTUnsealed + emptyPartialFile := &partialfile.PartialFile{} + + sectorRef := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 123, + Number: 123, + }, + ProofType: 1, + } + sectorSize := abi.SealProofInfos[1].SectorSize + + offset := abi.PaddedPieceSize(100) + size := abi.PaddedPieceSize(1000) + ctx := context.Background() + + tcs := map[string]struct { + storeFnc func(s *mocks.MockStore) + pfFunc func(s *mocks.MockpartialFileHandler) + indexFnc func(s *mocks.MockSectorIndex, serverURL string) + + needHttpServer bool + + getAllocatedReturnCode int + + serverUrl string + + // expectation + errStr string + expectedIsUnealed bool + }{ + + // -------- have the unsealed file locally + "fails when error while acquiring unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, xerrors.New("acquire error")) + }, + + errStr: "acquire error", + }, + + "fails when error while opening local partial (unsealed) file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) + }, + errStr: "pf open error", + }, + + "fails when error while checking if local unsealed file has piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, xerrors.New("piece check error")) + }, + + errStr: "piece check error", + }, + + "fails when error while closing local unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(xerrors.New("close error")).Times(1) + }, + errStr: "close error", + }, + + // ------------------- don't have the unsealed file locally + + "fails when error while finding sector": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, xerrors.New("find sector error")) + }, + errStr: "find sector error", + }, + + "false when no worker has unsealed file": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, _ string) { + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return(nil, nil) + }, + }, + + // false when local unsealed file does NOT have unsealed piece + "false when local unsealed file does not have the piece and remote sector too dosen't have the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + "false when none of the worker has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 500, + }, + + // ---- Success for local unsealed file + "true when local unsealed file has the piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + true, nil) + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + + }, + + expectedIsUnealed: true, + }, + + // --- Success for remote unsealed file + "true if we have a remote unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, "", nil) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 200, + expectedIsUnealed: true, + }, + + "true when local unsealed file does NOT have the unsealed Piece but remote sector has the unsealed piece": { + storeFnc: func(l *mocks.MockStore) { + mockSectorAcquire(l, sectorRef, pfPath, nil) + }, + + pfFunc: func(pf *mocks.MockpartialFileHandler) { + mockPartialFileOpen(pf, sectorSize, pfPath, nil) + mockCheckAllocation(pf, offset, size, emptyPartialFile, + false, nil) + + pf.EXPECT().Close(emptyPartialFile).Return(nil).Times(1) + }, + + indexFnc: func(in *mocks.MockSectorIndex, url string) { + si := stores.SectorStorageInfo{ + URLs: []string{url}, + } + + in.EXPECT().StorageFindSector(gomock.Any(), sectorRef.ID, storiface.FTUnsealed, gomock.Any(), + false).Return([]stores.SectorStorageInfo{si}, nil).Times(1) + }, + + needHttpServer: true, + getAllocatedReturnCode: 200, + expectedIsUnealed: true, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + lstore := mocks.NewMockStore(mockCtrl) + pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + index := mocks.NewMockSectorIndex(mockCtrl) + + if tc.storeFnc != nil { + tc.storeFnc(lstore) + } + if tc.pfFunc != nil { + tc.pfFunc(pfhandler) + } + + if tc.needHttpServer { + // run http server + ts := httptest.NewServer(&mockHttpServer{ + expectedSectorName: storiface.SectorName(sectorRef.ID), + expectedFileType: ft.String(), + expectedOffset: fmt.Sprintf("%d", offset.Unpadded()), + expectedSize: fmt.Sprintf("%d", size.Unpadded()), + expectedSectorType: fmt.Sprintf("%d", sectorRef.ProofType), + + getAllocatedReturnCode: tc.getAllocatedReturnCode, + }) + defer ts.Close() + tc.serverUrl = fmt.Sprintf("%s/remote/%s/%s", ts.URL, ft.String(), storiface.SectorName(sectorRef.ID)) + } + if tc.indexFnc != nil { + tc.indexFnc(index, tc.serverUrl) + } + + remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) + + isUnsealed, err := remoteStore.CheckIsUnsealed(ctx, sectorRef, offset, size) + + if tc.errStr != "" { + require.Error(t, err) + require.False(t, isUnsealed) + require.Contains(t, err.Error(), tc.errStr) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.expectedIsUnealed, isUnsealed) + + }) + } +} + +func mockSectorAcquire(l *mocks.MockStore, sectorRef storage.SectorRef, pfPath string, err error) { + l.EXPECT().AcquireSector(gomock.Any(), sectorRef, storiface.FTUnsealed, + storiface.FTNone, storiface.PathStorage, storiface.AcquireMove).Return(storiface.SectorPaths{ + Unsealed: pfPath, + }, + storiface.SectorPaths{}, err).Times(1) +} + +func mockPartialFileOpen(pf *mocks.MockpartialFileHandler, sectorSize abi.SectorSize, pfPath string, err error) { + pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, + err).Times(1) +} + +func mockCheckAllocation(pf *mocks.MockpartialFileHandler, offset, size abi.PaddedPieceSize, file *partialfile.PartialFile, + out bool, err error) { + pf.EXPECT().HasAllocated(file, storiface.UnpaddedByteIndex(offset.Unpadded()), + size.Unpadded()).Return(out, err).Times(1) +} + +func mockPfReader(pf *mocks.MockpartialFileHandler, file *partialfile.PartialFile, offset, size abi.PaddedPieceSize, + outFile *os.File, err error) { + pf.EXPECT().Reader(file, storiface.PaddedByteIndex(offset), size).Return(outFile, err) +} + +type mockHttpServer struct { + expectedSectorName string + expectedFileType string + expectedOffset string + expectedSize string + expectedSectorType string + + getAllocatedReturnCode int + + getSectorReturnCode int + getSectorBytes []byte +} + +func (m *mockHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + mux := mux.NewRouter() + mux.HandleFunc("/remote/{type}/{id}", m.getSector).Methods("GET") + mux.HandleFunc("/remote/{type}/{id}/{spt}/allocated/{offset}/{size}", m.getAllocated).Methods("GET") + mux.ServeHTTP(w, r) +} + +func (m *mockHttpServer) getAllocated(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + if vars["id"] != m.expectedSectorName { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["type"] != m.expectedFileType { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["spt"] != m.expectedSectorType { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["offset"] != m.expectedOffset { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["size"] != m.expectedSize { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(m.getAllocatedReturnCode) +} + +func (m *mockHttpServer) getSector(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + if vars["id"] != m.expectedSectorName { + w.WriteHeader(http.StatusBadRequest) + return + } + + if vars["type"] != m.expectedFileType { + w.WriteHeader(http.StatusBadRequest) + return + } + + w.WriteHeader(m.getSectorReturnCode) + _, _ = w.Write(m.getSectorBytes) +} diff --git a/extern/sector-storage/stores/util_unix.go b/extern/sector-storage/stores/util_unix.go index 2b057468d..943681b49 100644 --- a/extern/sector-storage/stores/util_unix.go +++ b/extern/sector-storage/stores/util_unix.go @@ -2,8 +2,10 @@ package stores import ( "bytes" + "os" "os/exec" "path/filepath" + "runtime" "strings" "github.com/mitchellh/go-homedir" @@ -33,7 +35,18 @@ func move(from, to string) error { // can do better var errOut bytes.Buffer - cmd := exec.Command("/usr/bin/env", "mv", "-t", toDir, from) // nolint + + var cmd *exec.Cmd + if runtime.GOOS == "darwin" { + if err := os.MkdirAll(toDir, 0777); err != nil { + return xerrors.Errorf("failed exec MkdirAll: %s", err) + } + + cmd = exec.Command("/usr/bin/env", "mv", from, toDir) // nolint + } else { + cmd = exec.Command("/usr/bin/env", "mv", "-t", toDir, from) // nolint + } + cmd.Stderr = &errOut if err := cmd.Run(); err != nil { return xerrors.Errorf("exec mv (stderr: %s): %w", strings.TrimSpace(errOut.String()), err) diff --git a/extern/sector-storage/storiface/ffi.go b/extern/sector-storage/storiface/ffi.go index f6b2cbdd3..2b6df667a 100644 --- a/extern/sector-storage/storiface/ffi.go +++ b/extern/sector-storage/storiface/ffi.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" ) @@ -17,6 +18,14 @@ func (i UnpaddedByteIndex) Padded() PaddedByteIndex { return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded()) } +func (i UnpaddedByteIndex) Valid() error { + if i%127 != 0 { + return xerrors.Errorf("unpadded byte index must be a multiple of 127") + } + + return nil +} + type PaddedByteIndex uint64 type RGetter func(ctx context.Context, id abi.SectorID) (cid.Cid, error) diff --git a/extern/sector-storage/storiface/filetype.go b/extern/sector-storage/storiface/filetype.go index 3f7c7455e..2e0999022 100644 --- a/extern/sector-storage/storiface/filetype.go +++ b/extern/sector-storage/storiface/filetype.go @@ -73,6 +73,24 @@ func (t SectorFileType) SealSpaceUse(ssize abi.SectorSize) (uint64, error) { return need, nil } +func (t SectorFileType) StoreSpaceUse(ssize abi.SectorSize) (uint64, error) { + var need uint64 + for _, pathType := range PathTypes { + if !t.Has(pathType) { + continue + } + + oh, ok := FsOverheadFinalized[pathType] + if !ok { + return 0, xerrors.Errorf("no finalized overhead info for %s", pathType) + } + + need += uint64(oh) * uint64(ssize) / FSOverheadDen + } + + return need, nil +} + func (t SectorFileType) All() [FileTypes]bool { var out [FileTypes]bool diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index 49d1de357..d1373f4c5 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "io" "time" "github.com/google/uuid" @@ -19,7 +18,12 @@ import ( type WorkerInfo struct { Hostname string - Resources WorkerResources + // IgnoreResources indicates whether the worker's available resources should + // be used ignored (true) or used (false) for the purposes of scheduling and + // task assignment. Only supported on local workers. Used for testing. + // Default should be false (zero value, i.e. resources taken into account). + IgnoreResources bool + Resources WorkerResources } type WorkerResources struct { @@ -87,7 +91,6 @@ type WorkerCalls interface { ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error) UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) - ReadPiece(context.Context, io.Writer, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize) (CallID, error) Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error) } diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index abbad4d9c..3e63f8659 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -20,7 +20,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" - storage "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -33,6 +33,11 @@ var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSea type WorkerConfig struct { TaskTypes []sealtasks.TaskType NoSwap bool + + // IgnoreResourceFiltering enables task distribution to happen on this + // worker regardless of its currently available resources. Used in testing + // with the local worker. + IgnoreResourceFiltering bool } // used do provide custom proofs impl (mostly used in testing) @@ -46,6 +51,9 @@ type LocalWorker struct { executor ExecutorFunc noSwap bool + // see equivalent field on WorkerConfig. + ignoreResources bool + ct *workerCallTracker acceptTasks map[sealtasks.TaskType]struct{} running sync.WaitGroup @@ -71,12 +79,12 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store ct: &workerCallTracker{ st: cst, }, - acceptTasks: acceptTasks, - executor: executor, - noSwap: wcfg.NoSwap, - - session: uuid.New(), - closing: make(chan struct{}), + acceptTasks: acceptTasks, + executor: executor, + noSwap: wcfg.NoSwap, + ignoreResources: wcfg.IgnoreResourceFiltering, + session: uuid.New(), + closing: make(chan struct{}), } if w.executor == nil { @@ -161,7 +169,6 @@ const ( ReleaseUnsealed ReturnType = "ReleaseUnsealed" MoveStorage ReturnType = "MoveStorage" UnsealPiece ReturnType = "UnsealPiece" - ReadPiece ReturnType = "ReadPiece" Fetch ReturnType = "Fetch" ) @@ -209,7 +216,6 @@ var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storifac ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - ReadPiece: rfunc(storiface.WorkerReturn.ReturnReadPiece), Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), } @@ -430,6 +436,7 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, } return l.asyncCall(ctx, sector, UnsealPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + log.Debugf("worker will unseal piece now, sector=%+v", sector.ID) if err = sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil { return nil, xerrors.Errorf("unsealing sector: %w", err) } @@ -442,21 +449,12 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storage.SectorRef, return nil, xerrors.Errorf("removing source data: %w", err) } + log.Debugf("worker has unsealed piece, sector=%+v", sector.ID) + return nil, nil }) } -func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { - sb, err := l.executor() - if err != nil { - return storiface.UndefCall, err - } - - return l.asyncCall(ctx, sector, ReadPiece, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { - return sb.ReadPiece(ctx, writer, sector, index, size) - }) -} - func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) { l.taskLk.Lock() defer l.taskLk.Unlock() @@ -511,7 +509,8 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { } return storiface.WorkerInfo{ - Hostname: hostname, + Hostname: hostname, + IgnoreResources: l.ignoreResources, Resources: storiface.WorkerResources{ MemPhysical: mem.Total, MemSwap: memSwap, diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index aeb3eea74..2160dd8e6 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -2,7 +2,6 @@ package sectorstorage import ( "context" - "io" "sync" "time" @@ -156,8 +155,4 @@ func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, i return t.tracker.track(ctx, t.wid, t.workerInfo, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) } -func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, id, sealtasks.TTReadUnsealed)(t.Worker.ReadPiece(ctx, writer, id, index, size)) -} - var _ Worker = &trackedWorker{} diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go index 9e12b8649..b71c2863c 100644 --- a/extern/storage-sealing/cbor_gen.go +++ b/extern/storage-sealing/cbor_gen.go @@ -8,7 +8,7 @@ import ( "sort" abi "github.com/filecoin-project/go-state-types/abi" - market "github.com/filecoin-project/specs-actors/actors/builtin/market" + api "github.com/filecoin-project/lotus/api" miner "github.com/filecoin-project/specs-actors/actors/builtin/miner" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -46,7 +46,7 @@ func (t *Piece) MarshalCBOR(w io.Writer) error { return err } - // t.DealInfo (sealing.DealInfo) (struct) + // t.DealInfo (api.PieceDealInfo) (struct) if len("DealInfo") > cbg.MaxLength { return xerrors.Errorf("Value in field \"DealInfo\" was too long") } @@ -107,7 +107,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { } } - // t.DealInfo (sealing.DealInfo) (struct) + // t.DealInfo (api.PieceDealInfo) (struct) case "DealInfo": { @@ -120,7 +120,7 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { if err := br.UnreadByte(); err != nil { return err } - t.DealInfo = new(DealInfo) + t.DealInfo = new(api.PieceDealInfo) if err := t.DealInfo.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) } @@ -136,384 +136,6 @@ func (t *Piece) UnmarshalCBOR(r io.Reader) error { return nil } -func (t *DealInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{165}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.PublishCid (cid.Cid) (struct) - if len("PublishCid") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"PublishCid\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("PublishCid")); err != nil { - return err - } - - if t.PublishCid == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { - return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) - } - } - - // t.DealID (abi.DealID) (uint64) - if len("DealID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealID\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealID")); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { - return err - } - - // t.DealProposal (market.DealProposal) (struct) - if len("DealProposal") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealProposal\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealProposal")); err != nil { - return err - } - - if err := t.DealProposal.MarshalCBOR(w); err != nil { - return err - } - - // t.DealSchedule (sealing.DealSchedule) (struct) - if len("DealSchedule") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealSchedule\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealSchedule")); err != nil { - return err - } - - if err := t.DealSchedule.MarshalCBOR(w); err != nil { - return err - } - - // t.KeepUnsealed (bool) (bool) - if len("KeepUnsealed") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { - return err - } - - if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { - return err - } - return nil -} - -func (t *DealInfo) UnmarshalCBOR(r io.Reader) error { - *t = DealInfo{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealInfo: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.PublishCid (cid.Cid) (struct) - case "PublishCid": - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) - } - - t.PublishCid = &c - } - - } - // t.DealID (abi.DealID) (uint64) - case "DealID": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) - - } - // t.DealProposal (market.DealProposal) (struct) - case "DealProposal": - - { - - b, err := br.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := br.UnreadByte(); err != nil { - return err - } - t.DealProposal = new(market.DealProposal) - if err := t.DealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) - } - } - - } - // t.DealSchedule (sealing.DealSchedule) (struct) - case "DealSchedule": - - { - - if err := t.DealSchedule.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) - } - - } - // t.KeepUnsealed (bool) (bool) - case "KeepUnsealed": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.KeepUnsealed = false - case 21: - t.KeepUnsealed = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} -func (t *DealSchedule) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{162}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.StartEpoch (abi.ChainEpoch) (int64) - if len("StartEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"StartEpoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("StartEpoch")); err != nil { - return err - } - - if t.StartEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - - // t.EndEpoch (abi.ChainEpoch) (int64) - if len("EndEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"EndEpoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("EndEpoch")); err != nil { - return err - } - - if t.EndEpoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error { - *t = DealSchedule{} - - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.StartEpoch (abi.ChainEpoch) (int64) - case "StartEpoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - // t.EndEpoch (abi.ChainEpoch) (int64) - case "EndEpoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EndEpoch = abi.ChainEpoch(extraI) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index 5ee39e58f..5ba23026d 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -62,7 +62,7 @@ func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api } if proposal.PieceCID != p.Piece.PieceCID { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)} + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)} } if p.Piece.Size != proposal.PieceSize { diff --git a/extern/storage-sealing/commit_batch.go b/extern/storage-sealing/commit_batch.go index 57379414f..e9ace820e 100644 --- a/extern/storage-sealing/commit_batch.go +++ b/extern/storage-sealing/commit_batch.go @@ -7,10 +7,6 @@ import ( "sync" "time" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -18,13 +14,16 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/node/config" @@ -35,6 +34,8 @@ const arp = abi.RegisteredAggregationProof_SnarkPackV1 var aggFeeNum = big.NewInt(110) var aggFeeDen = big.NewInt(100) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_commit_batcher.go -package=mocks . CommitBatcherApi + type CommitBatcherApi interface { SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) @@ -44,12 +45,13 @@ type CommitBatcherApi interface { StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) } type AggregateInput struct { - spt abi.RegisteredSealProof - info proof5.AggregateSealVerifyInfo - proof []byte + Spt abi.RegisteredSealProof + Info proof5.AggregateSealVerifyInfo + Proof []byte } type CommitBatcher struct { @@ -223,7 +225,7 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } if individual { - res, err = b.processIndividually() + res, err = b.processIndividually(cfg) } else { res, err = b.processBatch(cfg) } @@ -287,7 +289,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa collateral = big.Add(collateral, sc) params.SectorNumbers.Set(uint64(id)) - infos = append(infos, p.info) + infos = append(infos, p.Info) } sort.Slice(infos, func(i, j int) bool { @@ -295,7 +297,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa }) for _, info := range infos { - proofs = append(proofs, b.todo[info.Number].proof) + proofs = append(proofs, b.todo[info.Number].Proof) } mid, err := address.IDFromAddress(b.maddr) @@ -305,7 +307,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa params.AggregateProof, err = b.prover.AggregateSealProofs(proof5.AggregateSealVerifyProofAndInfos{ Miner: abi.ActorID(mid), - SealProof: b.todo[infos[0].Number].spt, + SealProof: b.todo[infos[0].Number].Spt, AggregateProof: arp, Infos: infos, }, proofs) @@ -339,6 +341,10 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa aggFee := big.Div(big.Mul(policy.AggregateNetworkFee(nv, len(infos), bf), aggFeeNum), aggFeeDen) needFunds := big.Add(collateral, aggFee) + needFunds, err = collateralSendAmount(b.mctx, b.api, b.maddr, cfg, needFunds) + if err != nil { + return []sealiface.CommitBatchRes{res}, err + } goodFunds := big.Add(maxFee, needFunds) @@ -359,12 +365,26 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.CommitBa return []sealiface.CommitBatchRes{res}, nil } -func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error) { +func (b *CommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.CommitBatchRes, error) { mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, nil) if err != nil { return nil, xerrors.Errorf("couldn't get miner info: %w", err) } + avail := types.TotalFilecoinInt + + if cfg.CollateralFromMinerBalance && !cfg.DisableCollateralFallback { + avail, err = b.api.StateMinerAvailableBalance(b.mctx, b.maddr, nil) + if err != nil { + return nil, xerrors.Errorf("getting available miner balance: %w", err) + } + + avail = big.Sub(avail, cfg.AvailableBalanceBuffer) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } + } + tok, _, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err @@ -378,7 +398,7 @@ func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error FailedSectors: map[abi.SectorNumber]string{}, } - mcid, err := b.processSingle(mi, sn, info, tok) + mcid, err := b.processSingle(cfg, mi, &avail, sn, info, tok) if err != nil { log.Errorf("process single error: %+v", err) // todo: return to user r.FailedSectors[sn] = err.Error() @@ -392,11 +412,11 @@ func (b *CommitBatcher) processIndividually() ([]sealiface.CommitBatchRes, error return res, nil } -func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) { +func (b *CommitBatcher) processSingle(cfg sealiface.Config, mi miner.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tok TipSetToken) (cid.Cid, error) { enc := new(bytes.Buffer) params := &miner.ProveCommitSectorParams{ SectorNumber: sn, - Proof: info.proof, + Proof: info.Proof, } if err := params.MarshalCBOR(enc); err != nil { @@ -408,6 +428,19 @@ func (b *CommitBatcher) processSingle(mi miner.MinerInfo, sn abi.SectorNumber, i return cid.Undef, err } + if cfg.CollateralFromMinerBalance { + c := big.Sub(collateral, *avail) + *avail = big.Sub(*avail, collateral) + collateral = c + + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + if (*avail).LessThan(big.Zero()) { + *avail = big.Zero() + } + } + goodFunds := big.Add(collateral, big.Int(b.feeCfg.MaxCommitGasFee)) from, _, err := b.addrSel(b.mctx, mi, api.CommitAddr, goodFunds, collateral) @@ -481,7 +514,7 @@ func (b *CommitBatcher) Pending(ctx context.Context) ([]abi.SectorID, error) { for _, s := range b.todo { res = append(res, abi.SectorID{ Miner: abi.ActorID(mid), - Number: s.info.Number, + Number: s.Info.Number, }) } diff --git a/extern/storage-sealing/commit_batch_test.go b/extern/storage-sealing/commit_batch_test.go new file mode 100644 index 000000000..aea6d455e --- /dev/null +++ b/extern/storage-sealing/commit_batch_test.go @@ -0,0 +1,383 @@ +package sealing_test + +import ( + "bytes" + "context" + "sort" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/mocks" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" +) + +func TestCommitBatcher(t *testing.T) { + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + ctx := context.Background() + + as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return t0123, big.Zero(), nil + } + + maxBatch := miner5.MaxAggregatedSectors + minBatch := miner5.MinAggregatedSectors + + cfg := func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 0, + MaxSealingSectorsForDeals: 0, + WaitDealsDelay: time.Hour * 6, + AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, + PreCommitBatchWait: 24 * time.Hour, + PreCommitBatchSlack: 3 * time.Hour, + + AggregateCommits: true, + MinCommitBatch: minBatch, + MaxCommitBatch: maxBatch, + CommitBatchWait: 24 * time.Hour, + CommitBatchSlack: 1 * time.Hour, + + AggregateAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: 5 * time.Minute, + }, nil + } + + type promise func(t *testing.T) + type action func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise + + actions := func(as ...action) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + var ps []promise + for _, a := range as { + p := a(t, s, pcb) + if p != nil { + ps = append(ps, p) + } + } + + if len(ps) > 0 { + return func(t *testing.T) { + for _, p := range ps { + p(t) + } + } + } + return nil + } + } + + addSector := func(sn abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + var pcres sealiface.CommitBatchRes + var pcerr error + done := sync.Mutex{} + done.Lock() + + si := sealing.SectorInfo{ + SectorNumber: sn, + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{ + PreCommitDeposit: big.Zero(), + }, nil) + + go func() { + defer done.Unlock() + pcres, pcerr = pcb.AddCommit(ctx, si, sealing.AggregateInput{ + Info: proof5.AggregateSealVerifyInfo{ + Number: sn, + }, + }) + }() + + return func(t *testing.T) { + done.Lock() + require.NoError(t, pcerr) + require.Empty(t, pcres.Error) + require.Contains(t, pcres.Sectors, si.SectorNumber) + } + } + } + + addSectors := func(sectors []abi.SectorNumber) action { + as := make([]action, len(sectors)) + for i, sector := range sectors { + as[i] = addSector(sector) + } + return actions(as...) + } + + waitPending := func(n int) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + require.Eventually(t, func() bool { + p, err := pcb.Pending(ctx) + require.NoError(t, err) + return len(p) == n + }, time.Second*5, 10*time.Millisecond) + + return nil + } + } + + expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) + + ti := len(expect) + batch := false + if ti >= minBatch { + batch = true + ti = 1 + } + + basefee := types.PicoFil + if aboveBalancer { + basefee = types.NanoFil + } + + if batch { + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) + } + + if !aboveBalancer { + batch = false + ti = len(expect) + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + + pciC := len(expect) + if failOnePCI { + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found + pciC = len(expect) - 1 + if !batch { + ti-- + } + } + s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&miner.SectorPreCommitOnChainInfo{ + PreCommitDeposit: big.Zero(), + }, nil).Times(pciC) + s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) + + if batch { + s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) + s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) + } + + s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool { + b := i.([]byte) + if batch { + var params miner5.ProveCommitAggregateParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + for _, number := range expect { + set, err := params.SectorNumbers.IsSet(uint64(number)) + require.NoError(t, err) + require.True(t, set) + } + } else { + var params miner5.ProveCommitSectorParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + } + return true + })).Times(ti) + return nil + } + } + + flush := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { + return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { + _ = expectSend(expect, aboveBalancer, failOnePCI)(t, s, pcb) + + batch := len(expect) >= minBatch && aboveBalancer + + r, err := pcb.Flush(ctx) + require.NoError(t, err) + if batch { + require.Len(t, r, 1) + require.Empty(t, r[0].Error) + sort.Slice(r[0].Sectors, func(i, j int) bool { + return r[0].Sectors[i] < r[0].Sectors[j] + }) + require.Equal(t, expect, r[0].Sectors) + if !failOnePCI { + require.Len(t, r[0].FailedSectors, 0) + } else { + require.Len(t, r[0].FailedSectors, 1) + _, found := r[0].FailedSectors[1] + require.True(t, found) + } + } else { + require.Len(t, r, len(expect)) + for _, res := range r { + require.Len(t, res.Sectors, 1) + require.Empty(t, res.Error) + } + sort.Slice(r, func(i, j int) bool { + return r[i].Sectors[0] < r[j].Sectors[0] + }) + for i, res := range r { + require.Equal(t, abi.SectorNumber(i), res.Sectors[0]) + if failOnePCI && res.Sectors[0] == 1 { + require.Len(t, res.FailedSectors, 1) + _, found := res.FailedSectors[1] + require.True(t, found) + } else { + require.Empty(t, res.FailedSectors) + } + } + } + + return nil + } + } + + getSectors := func(n int) []abi.SectorNumber { + out := make([]abi.SectorNumber, n) + for i := range out { + out[i] = abi.SectorNumber(i) + } + return out + } + + tcs := map[string]struct { + actions []action + }{ + "addSingle-aboveBalancer": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}, true, false), + }, + }, + "addTwo-aboveBalancer": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2), true, false), + }, + }, + "addAte-aboveBalancer": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), true, false), + }, + }, + "addMax-aboveBalancer": { + actions: []action{ + expectSend(getSectors(maxBatch), true, false), + addSectors(getSectors(maxBatch)), + }, + }, + "addSingle-belowBalancer": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}, false, false), + }, + }, + "addTwo-belowBalancer": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2), false, false), + }, + }, + "addAte-belowBalancer": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), false, false), + }, + }, + "addMax-belowBalancer": { + actions: []action{ + expectSend(getSectors(maxBatch), false, false), + addSectors(getSectors(maxBatch)), + }, + }, + + "addAte-aboveBalancer-failOne": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), true, true), + }, + }, + "addAte-belowBalancer-failOne": { + actions: []action{ + addSectors(getSectors(8)), + waitPending(8), + flush(getSectors(8), false, true), + }, + }, + } + + for name, tc := range tcs { + tc := tc + + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + pcapi := mocks.NewMockCommitBatcherApi(mockCtrl) + + pcb := sealing.NewCommitBatcher(ctx, t0123, pcapi, as, fc, cfg, &fakeProver{}) + + var promises []promise + + for _, a := range tc.actions { + p := a(t, pcapi, pcb) + if p != nil { + promises = append(promises, p) + } + } + + for _, p := range promises { + p(t) + } + + err := pcb.Stop(ctx) + require.NoError(t, err) + }) + } +} + +type fakeProver struct{} + +func (f fakeProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { + return []byte("Trust me, I'm a proof"), nil +} + +var _ ffiwrapper.Prover = &fakeProver{} diff --git a/extern/storage-sealing/currentdealinfo.go b/extern/storage-sealing/currentdealinfo.go index 44fa68b54..ed93512c2 100644 --- a/extern/storage-sealing/currentdealinfo.go +++ b/extern/storage-sealing/currentdealinfo.go @@ -69,6 +69,10 @@ func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context return dealID, nil, xerrors.Errorf("looking for publish deal message %s: search msg failed: %w", publishCid, err) } + if lookup == nil { + return dealID, nil, xerrors.Errorf("looking for publish deal message %s: not found", publishCid) + } + if lookup.Receipt.ExitCode != exitcode.Ok { return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", publishCid, lookup.Receipt.ExitCode) } diff --git a/extern/storage-sealing/currentdealinfo_test.go b/extern/storage-sealing/currentdealinfo_test.go index ee51d8c75..b28dd461a 100644 --- a/extern/storage-sealing/currentdealinfo_test.go +++ b/extern/storage-sealing/currentdealinfo_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" ) -var errNotFound = errors.New("Could not find") +var errNotFound = errors.New("could not find") func TestGetCurrentDealInfo(t *testing.T) { ctx := context.Background() @@ -180,6 +180,12 @@ func TestGetCurrentDealInfo(t *testing.T) { expectedDealID: zeroDealID, expectedError: xerrors.Errorf("looking for publish deal message %s: search msg failed: something went wrong", dummyCid), }, + "search message not found": { + publishCid: dummyCid, + targetProposal: &proposal, + expectedDealID: zeroDealID, + expectedError: xerrors.Errorf("looking for publish deal message %s: not found", dummyCid), + }, "return code not ok": { publishCid: dummyCid, searchMessageLookup: &MsgLookup{ diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 4d864980b..d04aef790 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -157,7 +157,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), ), CommitFinalizeFailed: planOne( - on(SectorRetryFinalize{}, CommitFinalizeFailed), + on(SectorRetryFinalize{}, CommitFinalize), ), CommitFailed: planOne( on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed), @@ -461,15 +461,16 @@ func (m *Sealing) onUpdateSector(ctx context.Context, state *SectorInfo) error { if err != nil { return xerrors.Errorf("getting config: %w", err) } - sp, err := m.currentSealProof(ctx) - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } shouldUpdateInput := m.stats.updateSector(cfg, m.minerSectorID(state.SectorNumber), state.State) // trigger more input processing when we've dipped below max sealing limits if shouldUpdateInput { + sp, err := m.currentSealProof(ctx) + if err != nil { + return xerrors.Errorf("getting seal proof type: %w", err) + } + go func() { m.inputLk.Lock() defer m.inputLk.Unlock() diff --git a/extern/storage-sealing/fsm_test.go b/extern/storage-sealing/fsm_test.go index 644ddedb4..1d2df2784 100644 --- a/extern/storage-sealing/fsm_test.go +++ b/extern/storage-sealing/fsm_test.go @@ -154,6 +154,45 @@ func TestHappyPathFinalizeEarly(t *testing.T) { } } +func TestCommitFinalizeFailed(t *testing.T) { + var notif []struct{ before, after SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: SectorStats{ + bySector: map[abi.SectorID]statSectorState{}, + }, + notifee: func(before, after SectorInfo) { + notif = append(notif, struct{ before, after SectorInfo }{before, after}) + }, + }, + t: t, + state: &SectorInfo{State: Committing}, + } + + m.planSingle(SectorProofReady{}) + require.Equal(m.t, m.state.State, CommitFinalize) + + m.planSingle(SectorFinalizeFailed{}) + require.Equal(m.t, m.state.State, CommitFinalizeFailed) + + m.planSingle(SectorRetryFinalize{}) + require.Equal(m.t, m.state.State, CommitFinalize) + + m.planSingle(SectorFinalized{}) + require.Equal(m.t, m.state.State, SubmitCommit) + + expected := []SectorState{Committing, CommitFinalize, CommitFinalizeFailed, CommitFinalize, SubmitCommit} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} func TestSeedRevert(t *testing.T) { ma, _ := address.NewIDAddress(55151) m := test{ @@ -277,3 +316,43 @@ func TestBrokenState(t *testing.T) { } } } + +func TestTicketExpired(t *testing.T) { + var notif []struct{ before, after SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: SectorStats{ + bySector: map[abi.SectorID]statSectorState{}, + }, + notifee: func(before, after SectorInfo) { + notif = append(notif, struct{ before, after SectorInfo }{before, after}) + }, + }, + t: t, + state: &SectorInfo{State: Packing}, + } + + m.planSingle(SectorPacked{}) + require.Equal(m.t, m.state.State, GetTicket) + + m.planSingle(SectorTicket{}) + require.Equal(m.t, m.state.State, PreCommit1) + + expired := checkTicketExpired(0, MaxTicketAge+1) + require.True(t, expired) + + m.planSingle(SectorOldTicket{}) + require.Equal(m.t, m.state.State, GetTicket) + + expected := []SectorState{Packing, GetTicket, PreCommit1, GetTicket} + for i, n := range notif { + if n.before.State != expected[i] { + t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) + } + if n.after.State != expected[i+1] { + t.Fatalf("expected after state: %s, got: %s", expected[i+1], n.after.State) + } + } +} diff --git a/extern/storage-sealing/gen/main.go b/extern/storage-sealing/gen/main.go index 97c2bacd5..825ce8d28 100644 --- a/extern/storage-sealing/gen/main.go +++ b/extern/storage-sealing/gen/main.go @@ -12,8 +12,6 @@ import ( func main() { err := gen.WriteMapEncodersToFile("./cbor_gen.go", "sealing", sealing.Piece{}, - sealing.DealInfo{}, - sealing.DealSchedule{}, sealing.SectorInfo{}, sealing.Log{}, ) diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go index 85a5c429f..1a0b7bf1e 100644 --- a/extern/storage-sealing/input.go +++ b/extern/storage-sealing/input.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" @@ -236,34 +237,34 @@ func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorIn return nil } -func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { +func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo) (api.SectorOffset, error) { log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) if (padreader.PaddedSize(uint64(size))) != size { - return 0, 0, xerrors.Errorf("cannot allocate unpadded piece") + return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece") } sp, err := m.currentSealProof(ctx) if err != nil { - return 0, 0, xerrors.Errorf("getting current seal proof type: %w", err) + return api.SectorOffset{}, xerrors.Errorf("getting current seal proof type: %w", err) } ssize, err := sp.SectorSize() if err != nil { - return 0, 0, err + return api.SectorOffset{}, err } if size > abi.PaddedPieceSize(ssize).Unpadded() { - return 0, 0, xerrors.Errorf("piece cannot fit into a sector") + return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector") } if _, err := deal.DealProposal.Cid(); err != nil { - return 0, 0, xerrors.Errorf("getting proposal CID: %w", err) + return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err) } m.inputLk.Lock() if _, exist := m.pendingPieces[proposalCID(deal)]; exist { m.inputLk.Unlock() - return 0, 0, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal)) + return api.SectorOffset{}, xerrors.Errorf("piece for deal %s already pending", proposalCID(deal)) } resCh := make(chan struct { @@ -295,7 +296,7 @@ func (m *Sealing) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPiec res := <-resCh - return res.sn, res.offset.Padded(), res.err + return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } // called with m.inputLk @@ -454,7 +455,7 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error { return m.sectors.Send(uint64(sid), SectorStartPacking{}) } -func proposalCID(deal DealInfo) cid.Cid { +func proposalCID(deal api.PieceDealInfo) cid.Cid { pc, err := deal.DealProposal.Cid() if err != nil { log.Errorf("DealProposal.Cid error: %+v", err) diff --git a/extern/storage-sealing/mocks/mock_commit_batcher.go b/extern/storage-sealing/mocks/mock_commit_batcher.go new file mode 100644 index 000000000..061121899 --- /dev/null +++ b/extern/storage-sealing/mocks/mock_commit_batcher.go @@ -0,0 +1,164 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: CommitBatcherApi) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + network "github.com/filecoin-project/go-state-types/network" + miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockCommitBatcherApi is a mock of CommitBatcherApi interface. +type MockCommitBatcherApi struct { + ctrl *gomock.Controller + recorder *MockCommitBatcherApiMockRecorder +} + +// MockCommitBatcherApiMockRecorder is the mock recorder for MockCommitBatcherApi. +type MockCommitBatcherApiMockRecorder struct { + mock *MockCommitBatcherApi +} + +// NewMockCommitBatcherApi creates a new mock instance. +func NewMockCommitBatcherApi(ctrl *gomock.Controller) *MockCommitBatcherApi { + mock := &MockCommitBatcherApi{ctrl: ctrl} + mock.recorder = &MockCommitBatcherApiMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCommitBatcherApi) EXPECT() *MockCommitBatcherApiMockRecorder { + return m.recorder +} + +// ChainBaseFee mocks base method. +func (m *MockCommitBatcherApi) ChainBaseFee(arg0 context.Context, arg1 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainBaseFee", arg0, arg1) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainBaseFee indicates an expected call of ChainBaseFee. +func (mr *MockCommitBatcherApiMockRecorder) ChainBaseFee(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainBaseFee", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainBaseFee), arg0, arg1) +} + +// ChainHead mocks base method. +func (m *MockCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(sealing.TipSetToken) + ret1, _ := ret[1].(abi.ChainEpoch) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockCommitBatcherApi)(nil).ChainHead), arg0) +} + +// SendMsg mocks base method. +func (m *MockCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2) +} + +// StateMinerInitialPledgeCollateral mocks base method. +func (m *MockCommitBatcherApi) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInitialPledgeCollateral indicates an expected call of StateMinerInitialPledgeCollateral. +func (mr *MockCommitBatcherApiMockRecorder) StateMinerInitialPledgeCollateral(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInitialPledgeCollateral", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateMinerInitialPledgeCollateral), arg0, arg1, arg2, arg3) +} + +// StateNetworkVersion mocks base method. +func (m *MockCommitBatcherApi) StateNetworkVersion(arg0 context.Context, arg1 sealing.TipSetToken) (network.Version, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateNetworkVersion", arg0, arg1) + ret0, _ := ret[0].(network.Version) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateNetworkVersion indicates an expected call of StateNetworkVersion. +func (mr *MockCommitBatcherApiMockRecorder) StateNetworkVersion(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateNetworkVersion", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateNetworkVersion), arg0, arg1) +} + +// StateSectorPreCommitInfo mocks base method. +func (m *MockCommitBatcherApi) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 sealing.TipSetToken) (*miner.SectorPreCommitOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSectorPreCommitInfo indicates an expected call of StateSectorPreCommitInfo. +func (mr *MockCommitBatcherApiMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockCommitBatcherApi)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) +} diff --git a/extern/storage-sealing/mocks/mock_precommit_batcher.go b/extern/storage-sealing/mocks/mock_precommit_batcher.go new file mode 100644 index 000000000..ed97229b4 --- /dev/null +++ b/extern/storage-sealing/mocks/mock_precommit_batcher.go @@ -0,0 +1,102 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/storage-sealing (interfaces: PreCommitBatcherApi) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + gomock "github.com/golang/mock/gomock" + cid "github.com/ipfs/go-cid" +) + +// MockPreCommitBatcherApi is a mock of PreCommitBatcherApi interface. +type MockPreCommitBatcherApi struct { + ctrl *gomock.Controller + recorder *MockPreCommitBatcherApiMockRecorder +} + +// MockPreCommitBatcherApiMockRecorder is the mock recorder for MockPreCommitBatcherApi. +type MockPreCommitBatcherApiMockRecorder struct { + mock *MockPreCommitBatcherApi +} + +// NewMockPreCommitBatcherApi creates a new mock instance. +func NewMockPreCommitBatcherApi(ctrl *gomock.Controller) *MockPreCommitBatcherApi { + mock := &MockPreCommitBatcherApi{ctrl: ctrl} + mock.recorder = &MockPreCommitBatcherApiMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPreCommitBatcherApi) EXPECT() *MockPreCommitBatcherApiMockRecorder { + return m.recorder +} + +// ChainHead mocks base method. +func (m *MockPreCommitBatcherApi) ChainHead(arg0 context.Context) (sealing.TipSetToken, abi.ChainEpoch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHead", arg0) + ret0, _ := ret[0].(sealing.TipSetToken) + ret1, _ := ret[1].(abi.ChainEpoch) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ChainHead indicates an expected call of ChainHead. +func (mr *MockPreCommitBatcherApiMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).ChainHead), arg0) +} + +// SendMsg mocks base method. +func (m *MockPreCommitBatcherApi) SendMsg(arg0 context.Context, arg1, arg2 address.Address, arg3 abi.MethodNum, arg4, arg5 big.Int, arg6 []byte) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendMsg", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockPreCommitBatcherApiMockRecorder) SendMsg(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).SendMsg), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// StateMinerAvailableBalance mocks base method. +func (m *MockPreCommitBatcherApi) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerAvailableBalance", arg0, arg1, arg2) + ret0, _ := ret[0].(big.Int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerAvailableBalance indicates an expected call of StateMinerAvailableBalance. +func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerAvailableBalance(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerAvailableBalance", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerAvailableBalance), arg0, arg1, arg2) +} + +// StateMinerInfo mocks base method. +func (m *MockPreCommitBatcherApi) StateMinerInfo(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (miner.MinerInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerInfo", arg0, arg1, arg2) + ret0, _ := ret[0].(miner.MinerInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerInfo indicates an expected call of StateMinerInfo. +func (mr *MockPreCommitBatcherApiMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerInfo", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateMinerInfo), arg0, arg1, arg2) +} diff --git a/extern/storage-sealing/precommit_batch.go b/extern/storage-sealing/precommit_batch.go index bb50c1f51..719455b90 100644 --- a/extern/storage-sealing/precommit_batch.go +++ b/extern/storage-sealing/precommit_batch.go @@ -7,9 +7,6 @@ import ( "sync" "time" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -20,14 +17,19 @@ import ( miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/node/config" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_precommit_batcher.go -package=mocks . PreCommitBatcherApi + type PreCommitBatcherApi interface { SendMsg(ctx context.Context, from, to address.Address, method abi.MethodNum, value, maxFee abi.TokenAmount, params []byte) (cid.Cid, error) StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) ChainHead(ctx context.Context) (TipSetToken, abi.ChainEpoch, error) } @@ -224,6 +226,11 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo deposit = big.Add(deposit, p.deposit) } + deposit, err := collateralSendAmount(b.mctx, b.api, b.maddr, cfg, deposit) + if err != nil { + return []sealiface.PreCommitBatchRes{res}, err + } + enc := new(bytes.Buffer) if err := params.MarshalCBOR(enc); err != nil { return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("couldn't serialize PreCommitSectorBatchParams: %w", err) @@ -249,7 +256,7 @@ func (b *PreCommitBatcher) processBatch(cfg sealiface.Config) ([]sealiface.PreCo res.Msg = &mcid - log.Infow("Sent ProveCommitAggregate message", "cid", mcid, "from", from, "sectors", len(b.todo)) + log.Infow("Sent PreCommitSectorBatch message", "cid", mcid, "from", from, "sectors", len(b.todo)) return []sealiface.PreCommitBatchRes{res}, nil } diff --git a/extern/storage-sealing/precommit_batch_test.go b/extern/storage-sealing/precommit_batch_test.go new file mode 100644 index 000000000..b6c35362e --- /dev/null +++ b/extern/storage-sealing/precommit_batch_test.go @@ -0,0 +1,257 @@ +package sealing_test + +import ( + "bytes" + "context" + "sort" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/mocks" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/node/config" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +var fc = config.MinerFeeConfig{ + MaxPreCommitGasFee: types.FIL(types.FromFil(1)), + MaxCommitGasFee: types.FIL(types.FromFil(1)), + MaxTerminateGasFee: types.FIL(types.FromFil(1)), + MaxPreCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, + MaxCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, +} + +func TestPrecommitBatcher(t *testing.T) { + t0123, err := address.NewFromString("t0123") + require.NoError(t, err) + + ctx := context.Background() + + as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return t0123, big.Zero(), nil + } + + maxBatch := miner5.PreCommitSectorBatchMaxSize + + cfg := func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 0, + MaxSealingSectorsForDeals: 0, + WaitDealsDelay: time.Hour * 6, + AlwaysKeepUnsealedCopy: true, + + BatchPreCommits: true, + MaxPreCommitBatch: maxBatch, + PreCommitBatchWait: 24 * time.Hour, + PreCommitBatchSlack: 3 * time.Hour, + + AggregateCommits: true, + MinCommitBatch: miner5.MinAggregatedSectors, + MaxCommitBatch: miner5.MaxAggregatedSectors, + CommitBatchWait: 24 * time.Hour, + CommitBatchSlack: 1 * time.Hour, + + TerminateBatchMin: 1, + TerminateBatchMax: 100, + TerminateBatchWait: 5 * time.Minute, + }, nil + } + + type promise func(t *testing.T) + type action func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise + + actions := func(as ...action) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + var ps []promise + for _, a := range as { + p := a(t, s, pcb) + if p != nil { + ps = append(ps, p) + } + } + + if len(ps) > 0 { + return func(t *testing.T) { + for _, p := range ps { + p(t) + } + } + } + return nil + } + } + + addSector := func(sn abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + var pcres sealiface.PreCommitBatchRes + var pcerr error + done := sync.Mutex{} + done.Lock() + + si := sealing.SectorInfo{ + SectorNumber: sn, + } + + s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) + + go func() { + defer done.Unlock() + pcres, pcerr = pcb.AddPreCommit(ctx, si, big.Zero(), &miner0.SectorPreCommitInfo{ + SectorNumber: si.SectorNumber, + SealedCID: fakePieceCid(t), + DealIDs: nil, + Expiration: 0, + }) + }() + + return func(t *testing.T) { + done.Lock() + require.NoError(t, pcerr) + require.Empty(t, pcres.Error) + require.Contains(t, pcres.Sectors, si.SectorNumber) + } + } + } + + addSectors := func(sectors []abi.SectorNumber) action { + as := make([]action, len(sectors)) + for i, sector := range sectors { + as[i] = addSector(sector) + } + return actions(as...) + } + + waitPending := func(n int) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + require.Eventually(t, func() bool { + p, err := pcb.Pending(ctx) + require.NoError(t, err) + return len(p) == n + }, time.Second*5, 10*time.Millisecond) + + return nil + } + } + + expectSend := func(expect []abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) + s.EXPECT().SendMsg(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), funMatcher(func(i interface{}) bool { + b := i.([]byte) + var params miner5.PreCommitSectorBatchParams + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b))) + for s, number := range expect { + require.Equal(t, number, params.Sectors[s].SectorNumber) + } + return true + })) + return nil + } + } + + flush := func(expect []abi.SectorNumber) action { + return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { + _ = expectSend(expect)(t, s, pcb) + + r, err := pcb.Flush(ctx) + require.NoError(t, err) + require.Len(t, r, 1) + require.Empty(t, r[0].Error) + sort.Slice(r[0].Sectors, func(i, j int) bool { + return r[0].Sectors[i] < r[0].Sectors[j] + }) + require.Equal(t, expect, r[0].Sectors) + + return nil + } + } + + getSectors := func(n int) []abi.SectorNumber { + out := make([]abi.SectorNumber, n) + for i := range out { + out[i] = abi.SectorNumber(i) + } + return out + } + + tcs := map[string]struct { + actions []action + }{ + "addSingle": { + actions: []action{ + addSector(0), + waitPending(1), + flush([]abi.SectorNumber{0}), + }, + }, + "addTwo": { + actions: []action{ + addSectors(getSectors(2)), + waitPending(2), + flush(getSectors(2)), + }, + }, + "addMax": { + actions: []action{ + expectSend(getSectors(maxBatch)), + addSectors(getSectors(maxBatch)), + }, + }, + } + + for name, tc := range tcs { + tc := tc + + t.Run(name, func(t *testing.T) { + // create go mock controller here + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + // create them mocks + pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl) + + pcb := sealing.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg) + + var promises []promise + + for _, a := range tc.actions { + p := a(t, pcapi, pcb) + if p != nil { + promises = append(promises, p) + } + } + + for _, p := range promises { + p(t) + } + + err := pcb.Stop(ctx) + require.NoError(t, err) + }) + } +} + +type funMatcher func(interface{}) bool + +func (funMatcher) Matches(interface{}) bool { + return true +} + +func (funMatcher) String() string { + return "fun" +} diff --git a/extern/storage-sealing/precommit_policy.go b/extern/storage-sealing/precommit_policy.go index 0b774b56f..a6add5693 100644 --- a/extern/storage-sealing/precommit_policy.go +++ b/extern/storage-sealing/precommit_policy.go @@ -40,7 +40,10 @@ type BasicPreCommitPolicy struct { duration abi.ChainEpoch } -// NewBasicPreCommitPolicy produces a BasicPreCommitPolicy +// NewBasicPreCommitPolicy produces a BasicPreCommitPolicy. +// +// The provided duration is used as the default sector expiry when the sector +// contains no deals. The proving boundary is used to adjust/align the sector's expiration. func NewBasicPreCommitPolicy(api Chain, duration abi.ChainEpoch, provingBoundary abi.ChainEpoch) BasicPreCommitPolicy { return BasicPreCommitPolicy{ api: api, diff --git a/extern/storage-sealing/precommit_policy_test.go b/extern/storage-sealing/precommit_policy_test.go index 52814167a..a6c17d3fd 100644 --- a/extern/storage-sealing/precommit_policy_test.go +++ b/extern/storage-sealing/precommit_policy_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/filecoin-project/go-state-types/network" + api "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/ipfs/go-cid" @@ -58,9 +59,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(42), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(70), EndEpoch: abi.ChainEpoch(75), }, @@ -71,9 +72,9 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(43), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(80), EndEpoch: abi.ChainEpoch(100), }, @@ -98,9 +99,9 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(44), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, @@ -125,9 +126,9 @@ func TestMissingDealIsIgnored(t *testing.T) { Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &sealing.DealInfo{ + DealInfo: &api.PieceDealInfo{ DealID: abi.DealID(44), - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, diff --git a/extern/storage-sealing/sealiface/config.go b/extern/storage-sealing/sealiface/config.go index 0410b92c0..e33b36263 100644 --- a/extern/storage-sealing/sealiface/config.go +++ b/extern/storage-sealing/sealiface/config.go @@ -24,6 +24,10 @@ type Config struct { FinalizeEarly bool + CollateralFromMinerBalance bool + AvailableBalanceBuffer abi.TokenAmount + DisableCollateralFallback bool + BatchPreCommits bool MaxPreCommitBatch int PreCommitBatchWait time.Duration diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 8a70704c4..3e40d10f3 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -59,6 +59,7 @@ type SealingAPI interface { StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, TipSetToken) (big.Int, error) StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) @@ -124,7 +125,7 @@ type openSector struct { type pendingPiece struct { size abi.UnpaddedPieceSize - deal DealInfo + deal api.PieceDealInfo data storage.Data diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index 201c4456f..bd5f489b4 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -142,7 +142,7 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI } if pci.Info.SealedCID != *sector.CommR { - log.Warnf("sector %d is precommitted on chain, with different CommR: %x != %x", sector.SectorNumber, pci.Info.SealedCID, sector.CommR) + log.Warnf("sector %d is precommitted on chain, with different CommR: %s != %s", sector.SectorNumber, pci.Info.SealedCID, sector.CommR) return nil // TODO: remove when the actor allows re-precommit } @@ -354,7 +354,7 @@ func (m *Sealing) handleRecoverDealIDs(ctx statemachine.Context, sector SectorIn } if proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) toFix = append(toFix, i) continue } diff --git a/extern/storage-sealing/states_proving.go b/extern/storage-sealing/states_proving.go index 212fd906f..5e613b20b 100644 --- a/extern/storage-sealing/states_proving.go +++ b/extern/storage-sealing/states_proving.go @@ -126,3 +126,22 @@ func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) er return ctx.Send(SectorRemoved{}) } + +func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error { + // TODO: track sector health / expiration + log.Infof("Proving sector %d", sector.SectorNumber) + + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting sealing config: %w", err) + } + + if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil { + log.Error(err) + } + + // TODO: Watch termination + // TODO: Auto-extend if set + + return nil +} diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 59778f4ad..5334fc72e 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -101,8 +101,8 @@ func (m *Sealing) padSector(ctx context.Context, sectorID storage.SectorRef, exi return out, nil } -func checkTicketExpired(sector SectorInfo, epoch abi.ChainEpoch) bool { - return epoch-sector.TicketEpoch > MaxTicketAge // TODO: allow configuring expected seal durations +func checkTicketExpired(ticket, head abi.ChainEpoch) bool { + return head-ticket > MaxTicketAge // TODO: allow configuring expected seal durations } func checkProveCommitExpired(preCommitEpoch, msd abi.ChainEpoch, currEpoch abi.ChainEpoch) bool { @@ -206,7 +206,7 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) return nil } - if checkTicketExpired(sector, height) { + if checkTicketExpired(sector.TicketEpoch, height) { pci, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) if err != nil { log.Errorf("handlePreCommit1: StateSectorPreCommitInfo: api error, not proceeding: %+v", err) @@ -313,7 +313,7 @@ func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) ( msd := policy.GetMaxProveCommitDuration(actors.VersionForNetwork(nv), sector.SectorType) - if minExpiration := height + msd + miner.MinSectorExpiration + 10; expiration < minExpiration { + if minExpiration := sector.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; expiration < minExpiration { expiration = minExpiration } // TODO: enforce a reasonable _maximum_ sector lifetime? @@ -357,8 +357,16 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf } } - params, deposit, tok, err := m.preCommitParams(ctx, sector) - if params == nil || err != nil { + params, pcd, tok, err := m.preCommitParams(ctx, sector) + if err != nil { + return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)}) + } + if params == nil { + return nil // event was sent in preCommitParams + } + + deposit, err := collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, pcd) + if err != nil { return err } @@ -389,7 +397,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } - return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: deposit, PreCommitInfo: *params}) + return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: pcd, PreCommitInfo: *params}) } func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error { @@ -398,9 +406,12 @@ func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector Se } params, deposit, _, err := m.preCommitParams(ctx, sector) - if params == nil || err != nil { + if err != nil { return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)}) } + if params == nil { + return nil // event was sent in preCommitParams + } res, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params) if err != nil { @@ -524,7 +535,7 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) log.Info("scheduling seal proof computation...") - log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%x; d:%x", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) + log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%s; d:%s", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) @@ -628,6 +639,11 @@ func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo collateral = big.Zero() } + collateral, err = collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, collateral) + if err != nil { + return err + } + goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee)) from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral) @@ -652,15 +668,15 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector S } res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ - info: proof.AggregateSealVerifyInfo{ + Info: proof.AggregateSealVerifyInfo{ Number: sector.SectorNumber, Randomness: sector.TicketValue, InteractiveRandomness: sector.SeedValue, SealedCID: *sector.CommR, UnsealedCID: *sector.CommD, }, - proof: sector.Proof, // todo: this correct?? - spt: sector.SectorType, + Proof: sector.Proof, // todo: this correct?? + Spt: sector.SectorType, }) if err != nil { return ctx.Send(SectorRetrySubmitCommit{}) @@ -739,22 +755,3 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorFinalized{}) } - -func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error { - // TODO: track sector health / expiration - log.Infof("Proving sector %d", sector.SectorNumber) - - cfg, err := m.getConfig() - if err != nil { - return xerrors.Errorf("getting sealing config: %w", err) - } - - if err := m.sealer.ReleaseUnsealed(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber), sector.keepUnsealedRanges(true, cfg.AlwaysKeepUnsealedCopy)); err != nil { - log.Error(err) - } - - // TODO: Watch termination - // TODO: Auto-extend if set - - return nil -} diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index 58c35cf36..c5aed505a 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -11,39 +11,22 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" - "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" ) // Piece is a tuple of piece and deal info type PieceWithDealInfo struct { Piece abi.PieceInfo - DealInfo DealInfo + DealInfo api.PieceDealInfo } // Piece is a tuple of piece info and optional deal type Piece struct { Piece abi.PieceInfo - DealInfo *DealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) -} - -// DealInfo is a tuple of deal identity and its schedule -type DealInfo struct { - PublishCid *cid.Cid - DealID abi.DealID - DealProposal *market.DealProposal - DealSchedule DealSchedule - KeepUnsealed bool -} - -// DealSchedule communicates the time interval of a storage deal. The deal must -// appear in a sealed (proven) sector no later than StartEpoch, otherwise it -// is invalid. -type DealSchedule struct { - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch + DealInfo *api.PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) } type Log struct { diff --git a/extern/storage-sealing/types_test.go b/extern/storage-sealing/types_test.go index aa314c37a..68e2b1111 100644 --- a/extern/storage-sealing/types_test.go +++ b/extern/storage-sealing/types_test.go @@ -10,6 +10,7 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/abi" + api "github.com/filecoin-project/lotus/api" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" tutils "github.com/filecoin-project/specs-actors/v2/support/testing" ) @@ -22,9 +23,9 @@ func TestSectorInfoSerialization(t *testing.T) { t.Fatal(err) } - dealInfo := DealInfo{ + dealInfo := api.PieceDealInfo{ DealID: d, - DealSchedule: DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: 0, EndEpoch: 100, }, diff --git a/extern/storage-sealing/utils.go b/extern/storage-sealing/utils.go index dadef227d..3dc4c4d1e 100644 --- a/extern/storage-sealing/utils.go +++ b/extern/storage-sealing/utils.go @@ -1,9 +1,16 @@ package sealing import ( + "context" "math/bits" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" ) func fillersFromRem(in abi.UnpaddedPieceSize) ([]abi.UnpaddedPieceSize, error) { @@ -55,3 +62,30 @@ func (m *Sealing) GetSectorInfo(sid abi.SectorNumber) (SectorInfo, error) { err := m.sectors.Get(uint64(sid)).Get(&out) return out, err } + +func collateralSendAmount(ctx context.Context, api interface { + StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) +}, maddr address.Address, cfg sealiface.Config, collateral abi.TokenAmount) (abi.TokenAmount, error) { + if cfg.CollateralFromMinerBalance { + if cfg.DisableCollateralFallback { + return big.Zero(), nil + } + + avail, err := api.StateMinerAvailableBalance(ctx, maddr, nil) + if err != nil { + return big.Zero(), xerrors.Errorf("getting available miner balance: %w", err) + } + + avail = big.Sub(avail, cfg.AvailableBalanceBuffer) + if avail.LessThan(big.Zero()) { + avail = big.Zero() + } + + collateral = big.Sub(collateral, avail) + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + } + + return collateral, nil +} diff --git a/gateway/handler.go b/gateway/handler.go new file mode 100644 index 000000000..3273c66db --- /dev/null +++ b/gateway/handler.go @@ -0,0 +1,48 @@ +package gateway + +import ( + "net/http" + + "contrib.go.opencensus.io/exporter/prometheus" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/metrics" + "github.com/gorilla/mux" + promclient "github.com/prometheus/client_golang/prometheus" +) + +// Handler returns a gateway http.Handler, to be mounted as-is on the server. +func Handler(a api.Gateway, opts ...jsonrpc.ServerOption) (http.Handler, error) { + m := mux.NewRouter() + + serveRpc := func(path string, hnd interface{}) { + rpcServer := jsonrpc.NewServer(opts...) + rpcServer.Register("Filecoin", hnd) + m.Handle(path, rpcServer) + } + + ma := metrics.MetricedGatewayAPI(a) + + serveRpc("/rpc/v1", ma) + serveRpc("/rpc/v0", api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), ma)) + + registry := promclient.DefaultRegisterer.(*promclient.Registry) + exporter, err := prometheus.NewExporter(prometheus.Options{ + Registry: registry, + Namespace: "lotus_gw", + }) + if err != nil { + return nil, err + } + m.Handle("/debug/metrics", exporter) + m.PathPrefix("/").Handler(http.DefaultServeMux) + + /*ah := &auth.Handler{ + Verify: nodeApi.AuthVerify, + Next: mux.ServeHTTP, + }*/ + + return m, nil +} diff --git a/gateway/node.go b/gateway/node.go new file mode 100644 index 000000000..3c7a67196 --- /dev/null +++ b/gateway/node.go @@ -0,0 +1,424 @@ +package gateway + +import ( + "context" + "fmt" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/ipfs/go-cid" +) + +const ( + DefaultLookbackCap = time.Hour * 24 + DefaultStateWaitLookbackLimit = abi.ChainEpoch(20) +) + +// TargetAPI defines the API methods that the Node depends on +// (to make it easy to mock for tests) +type TargetAPI interface { + Version(context.Context) (api.APIVersion, error) + ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) + ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) + ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) + ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) + ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) + ChainHasObj(context.Context, cid.Cid) (bool, error) + ChainHead(ctx context.Context) (*types.TipSet, error) + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + ChainReadObj(context.Context, cid.Cid) ([]byte, error) + GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) + MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) + MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) + MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error) + StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) + StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) + StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) + StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) + StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) + StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) + StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) + StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) + StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) + StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) + StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) + StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) + StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) + StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) + StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) + WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read +} + +var _ TargetAPI = *new(api.FullNode) // gateway depends on latest + +type Node struct { + target TargetAPI + lookbackCap time.Duration + stateWaitLookbackLimit abi.ChainEpoch + errLookback error +} + +var ( + _ api.Gateway = (*Node)(nil) + _ full.ChainModuleAPI = (*Node)(nil) + _ full.GasModuleAPI = (*Node)(nil) + _ full.MpoolModuleAPI = (*Node)(nil) + _ full.StateModuleAPI = (*Node)(nil) +) + +// NewNode creates a new gateway node. +func NewNode(api TargetAPI, lookbackCap time.Duration, stateWaitLookbackLimit abi.ChainEpoch) *Node { + return &Node{ + target: api, + lookbackCap: lookbackCap, + stateWaitLookbackLimit: stateWaitLookbackLimit, + errLookback: fmt.Errorf("lookbacks of more than %s are disallowed", lookbackCap), + } +} + +func (gw *Node) checkTipsetKey(ctx context.Context, tsk types.TipSetKey) error { + if tsk.IsEmpty() { + return nil + } + + ts, err := gw.target.ChainGetTipSet(ctx, tsk) + if err != nil { + return err + } + + return gw.checkTipset(ts) +} + +func (gw *Node) checkTipset(ts *types.TipSet) error { + at := time.Unix(int64(ts.Blocks()[0].Timestamp), 0) + if err := gw.checkTimestamp(at); err != nil { + return fmt.Errorf("bad tipset: %w", err) + } + return nil +} + +func (gw *Node) checkTipsetHeight(ts *types.TipSet, h abi.ChainEpoch) error { + tsBlock := ts.Blocks()[0] + heightDelta := time.Duration(uint64(tsBlock.Height-h)*build.BlockDelaySecs) * time.Second + timeAtHeight := time.Unix(int64(tsBlock.Timestamp), 0).Add(-heightDelta) + + if err := gw.checkTimestamp(timeAtHeight); err != nil { + return fmt.Errorf("bad tipset height: %w", err) + } + return nil +} + +func (gw *Node) checkTimestamp(at time.Time) error { + if time.Since(at) > gw.lookbackCap { + return gw.errLookback + } + return nil +} + +func (gw *Node) Version(ctx context.Context) (api.APIVersion, error) { + return gw.target.Version(ctx) +} + +func (gw *Node) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { + return gw.target.ChainGetBlockMessages(ctx, c) +} + +func (gw *Node) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { + return gw.target.ChainHasObj(ctx, c) +} + +func (gw *Node) ChainHead(ctx context.Context) (*types.TipSet, error) { + // TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify) + + return gw.target.ChainHead(ctx) +} + +func (gw *Node) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { + return gw.target.ChainGetMessage(ctx, mc) +} + +func (gw *Node) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return gw.target.ChainGetTipSet(ctx, tsk) +} + +func (gw *Node) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { + var ts *types.TipSet + if tsk.IsEmpty() { + head, err := gw.target.ChainHead(ctx) + if err != nil { + return nil, err + } + ts = head + } else { + gts, err := gw.target.ChainGetTipSet(ctx, tsk) + if err != nil { + return nil, err + } + ts = gts + } + + // Check if the tipset key refers to gw tipset that's too far in the past + if err := gw.checkTipset(ts); err != nil { + return nil, err + } + + // Check if the height is too far in the past + if err := gw.checkTipsetHeight(ts, h); err != nil { + return nil, err + } + + return gw.target.ChainGetTipSetByHeight(ctx, h, tsk) +} + +func (gw *Node) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { + return gw.target.ChainGetNode(ctx, p) +} + +func (gw *Node) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { + return gw.target.ChainNotify(ctx) +} + +func (gw *Node) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { + return gw.target.ChainReadObj(ctx, c) +} + +func (gw *Node) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.GasEstimateMessageGas(ctx, msg, spec, tsk) +} + +func (gw *Node) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) { + // TODO: additional anti-spam checks + return gw.target.MpoolPushUntrusted(ctx, sm) +} + +func (gw *Node) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return types.NewInt(0), err + } + + return gw.target.MsigGetAvailableBalance(ctx, addr, tsk) +} + +func (gw *Node) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { + if err := gw.checkTipsetKey(ctx, start); err != nil { + return types.NewInt(0), err + } + if err := gw.checkTipsetKey(ctx, end); err != nil { + return types.NewInt(0), err + } + + return gw.target.MsigGetVested(ctx, addr, start, end) +} + +func (gw *Node) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.MsigGetPending(ctx, addr, tsk) +} + +func (gw *Node) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return address.Undef, err + } + + return gw.target.StateAccountKey(ctx, addr, tsk) +} + +func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return api.DealCollateralBounds{}, err + } + + return gw.target.StateDealProviderCollateralBounds(ctx, size, verified, tsk) +} + +func (gw *Node) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.StateGetActor(ctx, actor, tsk) +} + +func (gw *Node) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.StateListMiners(ctx, tsk) +} + +func (gw *Node) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return address.Undef, err + } + + return gw.target.StateLookupID(ctx, addr, tsk) +} + +func (gw *Node) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return api.MarketBalance{}, err + } + + return gw.target.StateMarketBalance(ctx, addr, tsk) +} + +func (gw *Node) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + + return gw.target.StateMarketStorageDeal(ctx, dealId, tsk) +} + +func (gw *Node) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return network.VersionMax, err + } + + return gw.target.StateNetworkVersion(ctx, tsk) +} + +func (gw *Node) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + if limit == api.LookbackNoLimit { + limit = gw.stateWaitLookbackLimit + } + if gw.stateWaitLookbackLimit != api.LookbackNoLimit && limit > gw.stateWaitLookbackLimit { + limit = gw.stateWaitLookbackLimit + } + if err := gw.checkTipsetKey(ctx, from); err != nil { + return nil, err + } + + return gw.target.StateSearchMsg(ctx, from, msg, limit, allowReplaced) +} + +func (gw *Node) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { + if limit == api.LookbackNoLimit { + limit = gw.stateWaitLookbackLimit + } + if gw.stateWaitLookbackLimit != api.LookbackNoLimit && limit > gw.stateWaitLookbackLimit { + limit = gw.stateWaitLookbackLimit + } + + return gw.target.StateWaitMsg(ctx, msg, confidence, limit, allowReplaced) +} + +func (gw *Node) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateReadState(ctx, actor, tsk) +} + +func (gw *Node) StateMinerPower(ctx context.Context, m address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateMinerPower(ctx, m, tsk) +} + +func (gw *Node) StateMinerFaults(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return bitfield.BitField{}, err + } + return gw.target.StateMinerFaults(ctx, m, tsk) +} +func (gw *Node) StateMinerRecoveries(ctx context.Context, m address.Address, tsk types.TipSetKey) (bitfield.BitField, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return bitfield.BitField{}, err + } + return gw.target.StateMinerRecoveries(ctx, m, tsk) +} + +func (gw *Node) StateMinerInfo(ctx context.Context, m address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return miner.MinerInfo{}, err + } + return gw.target.StateMinerInfo(ctx, m, tsk) +} + +func (gw *Node) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]api.Deadline, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateMinerDeadlines(ctx, m, tsk) +} + +func (gw *Node) StateMinerAvailableBalance(ctx context.Context, m address.Address, tsk types.TipSetKey) (types.BigInt, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return types.BigInt{}, err + } + return gw.target.StateMinerAvailableBalance(ctx, m, tsk) +} + +func (gw *Node) StateMinerProvingDeadline(ctx context.Context, m address.Address, tsk types.TipSetKey) (*dline.Info, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateMinerProvingDeadline(ctx, m, tsk) +} + +func (gw *Node) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return types.BigInt{}, err + } + return gw.target.StateCirculatingSupply(ctx, tsk) +} + +func (gw *Node) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateSectorGetInfo(ctx, maddr, n, tsk) +} + +func (gw *Node) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return nil, err + } + return gw.target.StateVerifiedClientStatus(ctx, addr, tsk) +} + +func (gw *Node) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) { + if err := gw.checkTipsetKey(ctx, tsk); err != nil { + return api.CirculatingSupply{}, err + } + return gw.target.StateVMCirculatingSupplyInternal(ctx, tsk) +} + +func (gw *Node) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) (bool, error) { + return sigs.Verify(sig, k, msg) == nil, nil +} + +func (gw *Node) WalletBalance(ctx context.Context, k address.Address) (types.BigInt, error) { + return gw.target.WalletBalance(ctx, k) +} diff --git a/cmd/lotus-gateway/api_test.go b/gateway/node_test.go similarity index 91% rename from cmd/lotus-gateway/api_test.go rename to gateway/node_test.go index 23d2cbf3a..68711cca6 100644 --- a/cmd/lotus-gateway/api_test.go +++ b/gateway/node_test.go @@ -1,4 +1,4 @@ -package main +package gateway import ( "context" @@ -6,26 +6,24 @@ import ( "testing" "time" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - - "github.com/filecoin-project/lotus/build" - + "github.com/ipfs/go-cid" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" + "github.com/filecoin-project/lotus/chain/types/mock" ) func TestGatewayAPIChainGetTipSetByHeight(t *testing.T) { ctx := context.Background() - lookbackTimestamp := uint64(time.Now().Unix()) - uint64(LookbackCap.Seconds()) + lookbackTimestamp := uint64(time.Now().Unix()) - uint64(DefaultLookbackCap.Seconds()) type args struct { h abi.ChainEpoch tskh abi.ChainEpoch @@ -91,7 +89,7 @@ func TestGatewayAPIChainGetTipSetByHeight(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { mock := &mockGatewayDepsAPI{} - a := NewGatewayAPI(mock) + a := NewNode(mock, DefaultLookbackCap, DefaultStateWaitLookbackLimit) // Create tipsets from genesis up to tskh and return the highest ts := mock.createTipSets(tt.args.tskh, tt.args.genesisTS) @@ -111,7 +109,7 @@ type mockGatewayDepsAPI struct { lk sync.RWMutex tipsets []*types.TipSet - gatewayDepsAPI // satisfies all interface requirements but will panic if + TargetAPI // satisfies all interface requirements but will panic if // methods are called. easier than filling out with panic stubs IMO } @@ -235,3 +233,19 @@ func (m *mockGatewayDepsAPI) StateWaitMsgLimited(ctx context.Context, msg cid.Ci func (m *mockGatewayDepsAPI) StateReadState(ctx context.Context, act address.Address, ts types.TipSetKey) (*api.ActorState, error) { panic("implement me") } + +func (m *mockGatewayDepsAPI) Version(context.Context) (api.APIVersion, error) { + return api.APIVersion{ + APIVersion: api.FullAPIVersion1, + }, nil +} + +func TestGatewayVersion(t *testing.T) { + ctx := context.Background() + mock := &mockGatewayDepsAPI{} + a := NewNode(mock, DefaultLookbackCap, DefaultStateWaitLookbackLimit) + + v, err := a.Version(ctx) + require.NoError(t, err) + require.Equal(t, api.FullAPIVersion1, v.APIVersion) +} diff --git a/gen/main.go b/gen/main.go index 9548344fd..0018b241d 100644 --- a/gen/main.go +++ b/gen/main.go @@ -53,6 +53,8 @@ func main() { api.SealedRefs{}, api.SealTicket{}, api.SealSeed{}, + api.PieceDealInfo{}, + api.DealSchedule{}, ) if err != nil { fmt.Println(err) diff --git a/genesis/types.go b/genesis/types.go index db8d32a3b..d4c04113a 100644 --- a/genesis/types.go +++ b/genesis/types.go @@ -3,6 +3,8 @@ package genesis import ( "encoding/json" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -75,8 +77,9 @@ type Actor struct { } type Template struct { - Accounts []Actor - Miners []Miner + NetworkVersion network.Version + Accounts []Actor + Miners []Miner NetworkName string Timestamp uint64 `json:",omitempty"` diff --git a/go.mod b/go.mod index a29a108b0..198b862e6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/filecoin-project/lotus -go 1.15 +go 1.16 require ( contrib.go.opencensus.io/exporter/jaeger v0.1.0 @@ -8,6 +8,7 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/GeertJohan/go.rice v1.0.0 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee + github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 @@ -30,11 +31,11 @@ require ( github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 - github.com/filecoin-project/go-commp-utils v0.1.0 + github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v1.4.3 + github.com/filecoin-project/go-data-transfer v1.6.0 github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a - github.com/filecoin-project/go-fil-markets v1.2.5 + github.com/filecoin-project/go-fil-markets v1.5.0 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 @@ -43,17 +44,18 @@ require ( github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-statestore v0.1.1 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/specs-actors v0.9.13 - github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb - github.com/filecoin-project/specs-actors/v3 v3.1.0 - github.com/filecoin-project/specs-actors/v4 v4.0.0 + github.com/filecoin-project/specs-actors v0.9.14 + github.com/filecoin-project/specs-actors/v2 v2.3.5 + github.com/filecoin-project/specs-actors/v3 v3.1.1 + github.com/filecoin-project/specs-actors/v4 v4.0.1 github.com/filecoin-project/specs-actors/v5 v5.0.1 github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 + github.com/gdamore/tcell/v2 v2.2.0 github.com/go-kit/kit v0.10.0 github.com/go-ole/go-ole v1.2.4 // indirect - github.com/golang/mock v1.4.4 + github.com/golang/mock v1.5.0 github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.2 @@ -75,7 +77,7 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.6.0 + github.com/ipfs/go-graphsync v0.6.1 github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 @@ -87,7 +89,7 @@ require ( github.com/ipfs/go-ipfs-util v0.0.2 github.com/ipfs/go-ipld-cbor v0.0.5 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.1.3 + github.com/ipfs/go-log/v2 v2.3.0 github.com/ipfs/go-merkledag v0.3.2 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 @@ -100,37 +102,38 @@ require ( github.com/lib/pq v1.7.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.12.0 + github.com/libp2p/go-libp2p v0.14.2 github.com/libp2p/go-libp2p-connmgr v0.2.4 - github.com/libp2p/go-libp2p-core v0.7.0 + github.com/libp2p/go-libp2p-core v0.8.5 github.com/libp2p/go-libp2p-discovery v0.5.0 github.com/libp2p/go-libp2p-kad-dht v0.11.0 - github.com/libp2p/go-libp2p-mplex v0.3.0 - github.com/libp2p/go-libp2p-noise v0.1.2 - github.com/libp2p/go-libp2p-peerstore v0.2.6 - github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb - github.com/libp2p/go-libp2p-quic-transport v0.9.0 + github.com/libp2p/go-libp2p-mplex v0.4.1 + github.com/libp2p/go-libp2p-noise v0.2.0 + github.com/libp2p/go-libp2p-peerstore v0.2.7 + github.com/libp2p/go-libp2p-pubsub v0.5.0 + github.com/libp2p/go-libp2p-quic-transport v0.10.0 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.3.1 + github.com/libp2p/go-libp2p-swarm v0.5.0 github.com/libp2p/go-libp2p-tls v0.1.3 - github.com/libp2p/go-libp2p-yamux v0.4.1 + github.com/libp2p/go-libp2p-yamux v0.5.4 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.13 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.0.3 github.com/multiformats/go-multiaddr v0.3.1 - github.com/multiformats/go-multiaddr-dns v0.2.0 + github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.0.3 github.com/multiformats/go-multihash v0.0.14 - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/opentracing/opentracing-go v1.2.0 github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a github.com/prometheus/client_golang v1.6.0 github.com/raulk/clock v1.1.0 github.com/raulk/go-watchdog v1.0.1 + github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v1.0.0 @@ -141,26 +144,25 @@ require ( github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 - go.etcd.io/bbolt v1.3.4 - go.opencensus.io v0.22.5 + go.opencensus.io v0.23.0 go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.6.0 go.uber.org/zap v1.16.0 - golang.org/x/net v0.0.0-20201022231255-08b38378de70 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 + golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 + golang.org/x/tools v0.0.0-20210106214847-113979e3529a golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible honnef.co/go/tools v0.0.1-2020.1.3 // indirect ) -replace github.com/filecoin-project/lotus => ./ +replace github.com/libp2p/go-libp2p-yamux => github.com/libp2p/go-libp2p-yamux v0.5.1 -replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0 +replace github.com/filecoin-project/lotus => ./ replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 89af9d973..2624a0d3f 100644 --- a/go.sum +++ b/go.sum @@ -42,6 +42,8 @@ github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K1 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa/go.mod h1:WUmMvh9wMtqj1Xhf1hf3kp9RvL+y6odtdYxpyZjb90U= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= @@ -96,6 +98,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= @@ -103,14 +107,18 @@ github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dm github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4= @@ -147,7 +155,6 @@ github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 h1:Cb2pZUCFXlLA github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I= github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= @@ -183,8 +190,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= @@ -261,21 +270,21 @@ github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-commp-utils v0.1.0 h1:PaDxoXYh1TXnnz5kA/xSObpAQwcJSUs4Szb72nuaNdk= -github.com/filecoin-project/go-commp-utils v0.1.0/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.4.3 h1:ECEw69NOfmEZ7XN1NSBvj3KTbbH2mIczQs+Z2w4bD7c= -github.com/filecoin-project/go-data-transfer v1.4.3/go.mod h1:n8kbDQXWrY1c4UgfMa9KERxNCWbOTDwdNhf2MpN9dpo= +github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= +github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.2.5 h1:bQgtXbwxKyPxSEQoUI5EaTHJ0qfzyd5NosspuADCm6Y= -github.com/filecoin-project/go-fil-markets v1.2.5/go.mod h1:7JIqNBmFvOyBzk/EiPYnweVdQnWhshixb5B9b1653Ag= +github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= +github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -307,16 +316,20 @@ github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= +github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= -github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb h1:orr/sMzrDZUPAveRE+paBdu1kScIUO5zm+HYeh+VlhA= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v3 v3.1.0 h1:s4qiPw8pgypqBGAy853u/zdZJ7K9cTZdM1rTiSonHrg= +github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= +github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= -github.com/filecoin-project/specs-actors/v4 v4.0.0 h1:vMALksY5G3J5rj3q9rbcyB+f4Tk1xrLqSgdB3jOok4s= +github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= +github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= +github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= +github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo= github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE= @@ -325,8 +338,9 @@ github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/g github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -336,6 +350,10 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= +github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= @@ -387,8 +405,9 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= @@ -405,8 +424,9 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -418,8 +438,9 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= @@ -431,14 +452,16 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= -github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -603,8 +626,8 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.6.0 h1:x6UvDUGA7wjaKNqx5Vbo7FGT8aJ5ryYA0dMQ5jN3dF0= -github.com/ipfs/go-graphsync v0.6.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.6.1 h1:i9wN7YkBXWwIsUjVQeuaDxFB59yWZrG1xL564Nz7aGE= +github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= @@ -679,8 +702,11 @@ github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscw github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= -github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk= +github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= @@ -776,7 +802,6 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3 h1:Iy7Ifq2ysilWU4QlCx/97OoI4xT1IV7i8byT/EyIT/M= github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= @@ -787,6 +812,7 @@ github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -817,8 +843,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -841,8 +868,9 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.12.0 h1:+xai9RQnQ9l5elFOKvp5wRyjyWisSwEx+6nU2+onpUA= github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= +github.com/libp2p/go-libp2p v0.14.2 h1:qs0ABtjjNjS+RIXT1uM7sMJEvIc0pq2nKR0VQxFXhHI= +github.com/libp2p/go-libp2p v0.14.2/go.mod h1:0PQMADQEjCM2l8cSMYDpTgsb8gr6Zq7i4LUgq1mlW2E= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= @@ -853,8 +881,9 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0 h1:3y8XQbpr+ssX8QfZUHekjHCYK64sj6/4hnf/awD4+Ug= github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= @@ -901,8 +930,13 @@ github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.7.0 h1:4a0TMjrWNTZlNvcqxZmrMRDi/NQWrhwO2pkTuLSQ/IQ= github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.3/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= @@ -937,8 +971,10 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3 github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.3.0 h1:CZyqqKP0BSGQyPLvpRQougbfXaaaJZdGgzhCpJNuNSk= github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= @@ -950,8 +986,8 @@ github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFx github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= -github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= +github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= @@ -966,20 +1002,21 @@ github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb h1:HExLcdXn8fgtXPciUw97O5NNhBn31dt6d9fVUD4cngo= -github.com/libp2p/go-libp2p-pubsub v0.4.2-0.20210212194758-6c1addf493eb/go.mod h1:izkeMLvz6Ht8yAISXjx60XUQZMq9ZMe5h2ih4dLIBIQ= +github.com/libp2p/go-libp2p-pubsub v0.5.0 h1:OzcIuCWyJpOrWH0PTOfvxTzqFur4tiXpY5jXC8OxjyE= +github.com/libp2p/go-libp2p-pubsub v0.5.0/go.mod h1:MKnrsQkFgPcrQs1KVmOXy6Uz2RDQ1xO7dQo/P0Ba+ig= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= -github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E= -github.com/libp2p/go-libp2p-quic-transport v0.9.0/go.mod h1:xyY+IgxL0qsW7Kiutab0+NlxM0/p9yRtrGTYsuMWf70= +github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1006,8 +1043,10 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.3.1 h1:UTobu+oQHGdXTOGpZ4RefuVqYoJXcT0EBtSR74m2LkI= github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.4.3/go.mod h1:mmxP1pGBSc1Arw4F5DIjcpjFAmsRzA1KADuMtMuCT4g= +github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1015,8 +1054,9 @@ github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MB github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0 h1:ZiBYstPamsi7y6NJZebRudUzsYmVkt998hltyLqf8+g= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= @@ -1026,19 +1066,11 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= -github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU= -github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-yamux v0.5.1 h1:sX4WQPHMhRxJE5UZTfjEuBvlQWXB5Bo3A2JK9ZJ9EM0= +github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1050,8 +1082,9 @@ github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTW github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -1063,8 +1096,9 @@ github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/ github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -1080,8 +1114,9 @@ github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2 github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= @@ -1103,25 +1138,22 @@ github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.6 h1:O5qcBXRcfqecvQ/My9NqDNHB3/5t58yuJYqthcKhhgE= github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= -github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.0.0 h1:vSGhAy5u6iHBq11ZDcyHH4Blcf9xlBhT4WQDoOE90LU= +github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= -github.com/lucas-clemente/quic-go v0.18.1 h1:DMR7guC0NtVS8zNZR3IO7NARZvZygkSC56GGtC6cyys= -github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= +github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1134,13 +1166,13 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= -github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.0 h1:i/YPXVxz8q9umso/5y474CNcHmTpA+5DH+mFPjx6PZg= -github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -1152,11 +1184,13 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -1173,6 +1207,8 @@ github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nr github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1219,8 +1255,9 @@ github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/94 github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.1.0/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY= -github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= @@ -1250,8 +1287,10 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU= github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1268,8 +1307,6 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= @@ -1287,6 +1324,7 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -1381,6 +1419,8 @@ github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6R github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1463,6 +1503,8 @@ github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -1569,6 +1611,7 @@ github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/ github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= @@ -1578,7 +1621,6 @@ github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YS go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= -go.dedis.ch/kyber/v3 v3.0.9 h1:i0ZbOQocHUjfFasBiUql5zVeC7u/vahFd96DFA8UOWk= go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= @@ -1597,8 +1639,8 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1651,16 +1693,19 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1683,8 +1728,9 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1695,6 +1741,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1730,6 +1777,7 @@ golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1737,8 +1785,11 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201022231255-08b38378de70 h1:Z6x4N9mAi4oF0TbHweCsH618MO6OI6UFgV0FP5n0wBY= golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1756,8 +1807,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1824,14 +1875,21 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 h1:kHSDPqCtsHZOg0nVylfTo20DDhE9gG4Y0jn7hKQ0QAM= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1875,10 +1933,12 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1946,8 +2006,9 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1963,8 +2024,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= diff --git a/itests/api_test.go b/itests/api_test.go new file mode 100644 index 000000000..ba77701a2 --- /dev/null +++ b/itests/api_test.go @@ -0,0 +1,200 @@ +package itests + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestAPI(t *testing.T) { + t.Run("direct", func(t *testing.T) { + runAPITest(t) + }) + t.Run("rpc", func(t *testing.T) { + runAPITest(t, kit.ThroughRPC()) + }) +} + +type apiSuite struct { + opts []interface{} +} + +// runAPITest is the entry point to API test suite +func runAPITest(t *testing.T, opts ...interface{}) { + ts := apiSuite{opts: opts} + + t.Run("version", ts.testVersion) + t.Run("id", ts.testID) + t.Run("testConnectTwo", ts.testConnectTwo) + t.Run("testMining", ts.testMining) + t.Run("testMiningReal", ts.testMiningReal) + t.Run("testSearchMsg", ts.testSearchMsg) + t.Run("testNonGenesisMiner", ts.testNonGenesisMiner) +} + +func (ts *apiSuite) testVersion(t *testing.T) { + lapi.RunningNodeType = lapi.NodeFull + t.Cleanup(func() { + lapi.RunningNodeType = lapi.NodeUnknown + }) + + full, _, _ := kit.EnsembleMinimal(t, ts.opts...) + + v, err := full.Version(context.Background()) + require.NoError(t, err) + + versions := strings.Split(v.Version, "+") + require.NotZero(t, len(versions), "empty version") + require.Equal(t, versions[0], build.BuildVersion) +} + +func (ts *apiSuite) testID(t *testing.T) { + ctx := context.Background() + + full, _, _ := kit.EnsembleMinimal(t, ts.opts...) + + id, err := full.ID(ctx) + if err != nil { + t.Fatal(err) + } + require.Regexp(t, "^12", id.Pretty()) +} + +func (ts *apiSuite) testConnectTwo(t *testing.T) { + ctx := context.Background() + + one, two, _, ens := kit.EnsembleTwoOne(t, ts.opts...) + + p, err := one.NetPeers(ctx) + require.NoError(t, err) + require.Empty(t, p, "node one has peers") + + p, err = two.NetPeers(ctx) + require.NoError(t, err) + require.Empty(t, p, "node two has peers") + + ens.InterconnectAll() + + peers, err := one.NetPeers(ctx) + require.NoError(t, err) + require.Lenf(t, peers, 2, "node one doesn't have 2 peers") + + peers, err = two.NetPeers(ctx) + require.NoError(t, err) + require.Lenf(t, peers, 2, "node two doesn't have 2 peers") +} + +func (ts *apiSuite) testSearchMsg(t *testing.T) { + ctx := context.Background() + + full, _, ens := kit.EnsembleMinimal(t, ts.opts...) + + senderAddr, err := full.WalletDefaultAddress(ctx) + require.NoError(t, err) + + msg := &types.Message{ + From: senderAddr, + To: senderAddr, + Value: big.Zero(), + } + + ens.BeginMining(100 * time.Millisecond) + + sm, err := full.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + + require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") + + searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.NotNil(t, searchRes) + + require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) +} + +func (ts *apiSuite) testMining(t *testing.T) { + ctx := context.Background() + + full, miner, _ := kit.EnsembleMinimal(t, ts.opts...) + + newHeads, err := full.ChainNotify(ctx) + require.NoError(t, err) + initHead := (<-newHeads)[0] + baseHeight := initHead.Val.Height() + + h1, err := full.ChainHead(ctx) + require.NoError(t, err) + require.Equal(t, int64(h1.Height()), int64(baseHeight)) + + bm := kit.NewBlockMiner(t, miner) + bm.MineUntilBlock(ctx, full, nil) + require.NoError(t, err) + + <-newHeads + + h2, err := full.ChainHead(ctx) + require.NoError(t, err) + require.Greater(t, int64(h2.Height()), int64(h1.Height())) + + bm.MineUntilBlock(ctx, full, nil) + require.NoError(t, err) + + <-newHeads + + h3, err := full.ChainHead(ctx) + require.NoError(t, err) + require.Greater(t, int64(h3.Height()), int64(h2.Height())) +} + +func (ts *apiSuite) testMiningReal(t *testing.T) { + build.InsecurePoStValidation = false + defer func() { + build.InsecurePoStValidation = true + }() + + ts.testMining(t) +} + +func (ts *apiSuite) testNonGenesisMiner(t *testing.T) { + ctx := context.Background() + + full, genesisMiner, ens := kit.EnsembleMinimal(t, append(ts.opts, kit.MockProofs())...) + ens.InterconnectAll().BeginMining(4 * time.Millisecond) + + time.Sleep(1 * time.Second) + + gaa, err := genesisMiner.ActorAddress(ctx) + require.NoError(t, err) + + _, err = full.StateMinerInfo(ctx, gaa, types.EmptyTSK) + require.NoError(t, err) + + var newMiner kit.TestMiner + ens.Miner(&newMiner, full, + kit.OwnerAddr(full.DefaultKey), + kit.ProofType(abi.RegisteredSealProof_StackedDrg2KiBV1), // we're using v0 actors with old proofs. + kit.WithAllSubsystems(), + ).Start().InterconnectAll() + + ta, err := newMiner.ActorAddress(ctx) + require.NoError(t, err) + + tid, err := address.IDFromAddress(ta) + require.NoError(t, err) + + require.Equal(t, uint64(1001), tid) +} diff --git a/itests/batch_deal_test.go b/itests/batch_deal_test.go new file mode 100644 index 000000000..3881c917a --- /dev/null +++ b/itests/batch_deal_test.go @@ -0,0 +1,129 @@ +package itests + +import ( + "context" + "fmt" + "sort" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/stretchr/testify/require" +) + +func TestBatchDealInput(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blockTime = 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + dealStartEpoch = abi.ChainEpoch(2 << 12) + ) + + run := func(piece, deals, expectSectors int) func(t *testing.T) { + return func(t *testing.T) { + ctx := context.Background() + + publishPeriod := 10 * time.Second + maxDealsPerMsg := uint64(deals) + + // Set max deals per publish deals message to maxDealsPerMsg + opts := kit.ConstructorOpts(node.Options( + node.Override( + new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + })), + node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { + return func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 2, + MaxSealingSectors: 1, + MaxSealingSectorsForDeals: 3, + AlwaysKeepUnsealedCopy: true, + WaitDealsDelay: time.Hour, + }, nil + }, nil + }), + )) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + err := miner.MarketSetAsk(ctx, big.Zero(), big.Zero(), 200, 128, 32<<30) + require.NoError(t, err) + + checkNoPadding := func() { + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + + sort.Slice(sl, func(i, j int) bool { + return sl[i] < sl[j] + }) + + for _, snum := range sl { + si, err := miner.SectorsStatus(ctx, snum, false) + require.NoError(t, err) + + // fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State) + + for _, deal := range si.Deals { + if deal == 0 { + fmt.Printf("sector %d had a padding piece!\n", snum) + } + } + } + } + + // Starts a deal and waits until it's published + runDealTillSeal := func(rseed int) { + res, _, _, err := kit.CreateImportFile(ctx, client, rseed, piece) + require.NoError(t, err) + + deal := dh.StartDeal(ctx, res.Root, false, dealStartEpoch) + dh.WaitDealSealed(ctx, deal, false, true, checkNoPadding) + } + + // Run maxDealsPerMsg deals in parallel + done := make(chan struct{}, maxDealsPerMsg) + for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ { + rseed := rseed + go func() { + runDealTillSeal(rseed) + done <- struct{}{} + }() + } + + // Wait for maxDealsPerMsg of the deals to be published + for i := 0; i < int(maxDealsPerMsg); i++ { + <-done + } + + checkNoPadding() + + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + require.Equal(t, len(sl), expectSectors) + } + } + + t.Run("4-p1600B", run(1600, 4, 4)) + t.Run("4-p513B", run(513, 4, 2)) + if !testing.Short() { + t.Run("32-p257B", run(257, 32, 8)) + t.Run("32-p10B", run(10, 32, 2)) + + // fixme: this appears to break data-transfer / markets in some really creative ways + // t.Run("128-p10B", run(10, 128, 8)) + } +} diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go new file mode 100644 index 000000000..dfd0144f2 --- /dev/null +++ b/itests/ccupgrade_test.go @@ -0,0 +1,105 @@ +package itests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + + "github.com/stretchr/testify/require" +) + +func TestCCUpgrade(t *testing.T) { + kit.QuietMiningLogs() + + for _, height := range []abi.ChainEpoch{ + -1, // before + 162, // while sealing + 530, // after upgrade deal + 5000, // after + } { + height := height // make linters happy by copying + t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { + runTestCCUpgrade(t, height) + }) + } +} + +func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { + ctx := context.Background() + blockTime := 5 * time.Millisecond + + opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + + maddr, err := miner.ActorAddress(ctx) + if err != nil { + t.Fatal(err) + } + + CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) + Upgraded := CC + 1 + + miner.PledgeSectors(ctx, 1, 0, nil) + + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + require.Equal(t, CC, sl[0], "unexpected sector number") + + { + si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(si.Expiration)) + } + + err = miner.SectorMarkForUpgrade(ctx, sl[0]) + require.NoError(t, err) + + dh := kit.NewDealHarness(t, client, miner, miner) + deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{ + Rseed: 6, + SuspendUntilCryptoeconStable: true, + }) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) + kit.AssertFilesEqual(t, inPath, outPath) + + // Validate upgrade + + { + exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) + if err != nil { + require.Contains(t, err.Error(), "failed to find sector 3") // already cleaned up + } else { + require.NoError(t, err) + require.NotNil(t, exp) + require.Greater(t, 50000, int(exp.OnTime)) + } + } + { + exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(exp.OnTime)) + } + + dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + // Sector should expire. + for { + // Wait for the sector to expire. + status, err := miner.SectorsStatus(ctx, CC, true) + require.NoError(t, err) + if status.OnTime == 0 && status.Early == 0 { + break + } + t.Log("waiting for sector to expire") + // wait one deadline per loop. + time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime) + } +} diff --git a/itests/cli_test.go b/itests/cli_test.go new file mode 100644 index 000000000..0bd1ec3b4 --- /dev/null +++ b/itests/cli_test.go @@ -0,0 +1,21 @@ +package itests + +import ( + "os" + "testing" + "time" + + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/itests/kit" +) + +// TestClient does a basic test to exercise the client CLI commands. +func TestClient(t *testing.T) { + _ = os.Setenv("BELLMAN_NO_GPU", "1") + kit.QuietMiningLogs() + + blockTime := 5 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + ens.InterconnectAll().BeginMining(blockTime) + kit.RunClientTest(t, cli.Commands, client) +} diff --git a/api/test/deadlines.go b/itests/deadlines_test.go similarity index 66% rename from api/test/deadlines.go rename to itests/deadlines_test.go index 3eb50402c..19b0a10dc 100644 --- a/api/test/deadlines.go +++ b/itests/deadlines_test.go @@ -1,26 +1,18 @@ -package test +package itests import ( "bytes" "context" - "fmt" "testing" "time" - "github.com/filecoin-project/lotus/api" - - "github.com/stretchr/testify/require" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" - miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" @@ -28,7 +20,12 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node/impl" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/stretchr/testify/require" ) // TestDeadlineToggling: @@ -54,30 +51,43 @@ import ( // * goes through another PP // * asserts that miner B loses power // * asserts that miner D loses power, is inactive -func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { - var upgradeH abi.ChainEpoch = 4000 - var provingPeriod abi.ChainEpoch = 2880 +func TestDeadlineToggling(t *testing.T) { + kit.Expensive(t) - const sectorsC, sectorsD, sectersB = 10, 9, 8 + kit.QuietMiningLogs() + + const sectorsC, sectorsD, sectorsB = 10, 9, 8 + + var ( + upgradeH abi.ChainEpoch = 4000 + provingPeriod abi.ChainEpoch = 2880 + blocktime = 2 * time.Millisecond + ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, sn := b(t, []FullNodeOpts{FullNodeWithV4ActorsAt(upgradeH)}, OneMiner) + var ( + client kit.TestFullNode + minerA kit.TestMiner + minerB kit.TestMiner + minerC kit.TestMiner + minerD kit.TestMiner + minerE kit.TestMiner + ) + opts := []kit.NodeOpt{kit.ConstructorOpts(kit.NetworkUpgradeAt(network.Version12, upgradeH))} + opts = append(opts, kit.WithAllSubsystems()) + ens := kit.NewEnsemble(t, kit.MockProofs()). + FullNode(&client, opts...). + Miner(&minerA, &client, opts...). + Start(). + InterconnectAll() + ens.BeginMining(blocktime) - client := n[0].FullNode.(*impl.FullNodeAPI) - minerA := sn[0] - - { - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := minerA.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - } + opts = append(opts, kit.OwnerAddr(client.DefaultKey)) + ens.Miner(&minerB, &client, opts...). + Miner(&minerC, &client, opts...). + Start() defaultFrom, err := client.WalletDefaultAddress(ctx) require.NoError(t, err) @@ -87,28 +97,6 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { build.Clock.Sleep(time.Second) - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := minerA.MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() - - minerB := n[0].Stb(ctx, t, TestSpt, defaultFrom) - minerC := n[0].Stb(ctx, t, TestSpt, defaultFrom) - maddrB, err := minerB.ActorAddress(ctx) require.NoError(t, err) maddrC, err := minerC.ActorAddress(ctx) @@ -119,20 +107,20 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { // pledge sectors on C, go through a PP, check for power { - pledgeSectors(t, ctx, minerC, sectorsC, 0, nil) + minerC.PledgeSectors(ctx, sectorsC, 0, nil) di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK) require.NoError(t, err) - fmt.Printf("Running one proving period (miner C)\n") - fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2) + t.Log("Running one proving period (miner C)") + t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2) for { head, err := client.ChainHead(ctx) require.NoError(t, err) if head.Height() > di.PeriodStart+provingPeriod*2 { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) @@ -153,13 +141,13 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { require.NoError(t, err) if head.Height() > upgradeH+provingPeriod { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) } - checkMiner := func(ma address.Address, power abi.StoragePower, active bool, tsk types.TipSetKey) { + checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) { p, err := client.StateMinerPower(ctx, ma, tsk) require.NoError(t, err) @@ -174,6 +162,22 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { act, err := mst.DeadlineCronActive() require.NoError(t, err) + + if tsk != types.EmptyTSK { + ts, err := client.ChainGetTipSet(ctx, tsk) + require.NoError(t, err) + di, err := mst.DeadlineInfo(ts.Height()) + require.NoError(t, err) + + // cron happened on the same epoch some other condition would have happened + if di.Open == ts.Height() { + act, err := mst.DeadlineCronActive() + require.NoError(t, err) + require.Equal(t, activeIfCron, act) + return + } + } + require.Equal(t, active, act) } @@ -181,15 +185,16 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { { uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK) require.NoError(t, err) - checkMiner(maddrB, types.NewInt(0), true, uts.Key()) + checkMiner(maddrB, types.NewInt(0), true, true, uts.Key()) } nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) require.NoError(t, err) require.GreaterOrEqual(t, nv, network.Version12) - minerD := n[0].Stb(ctx, t, TestSpt, defaultFrom) - minerE := n[0].Stb(ctx, t, TestSpt, defaultFrom) + ens.Miner(&minerD, &client, opts...). + Miner(&minerE, &client, opts...). + Start() maddrD, err := minerD.ActorAddress(ctx) require.NoError(t, err) @@ -197,19 +202,19 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { require.NoError(t, err) // first round of miner checks - checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK) - checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, types.EmptyTSK) + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) + checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK) - checkMiner(maddrB, types.NewInt(0), false, types.EmptyTSK) - checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK) - checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK) + checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK) + checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK) + checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK) // pledge sectors on minerB/minerD, stop post on minerC - pledgeSectors(t, ctx, minerB, sectersB, 0, nil) - checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK) + minerB.PledgeSectors(ctx, sectorsB, 0, nil) + checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK) - pledgeSectors(t, ctx, minerD, sectorsD, 0, nil) - checkMiner(maddrD, types.NewInt(0), true, types.EmptyTSK) + minerD.PledgeSectors(ctx, sectorsD, 0, nil) + checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK) minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail() @@ -224,7 +229,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { params := &miner.SectorPreCommitInfo{ Expiration: 2880 * 300, SectorNumber: 22, - SealProof: TestSpt, + SealProof: kit.TestSpt, SealedCID: cr, SealRandEpoch: head.Height() - 200, @@ -253,13 +258,13 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { require.NoError(t, err) if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) } - checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK) + checkMiner(maddrE, types.NewInt(0), true, true, types.EmptyTSK) // go through rest of the PP for { @@ -267,18 +272,18 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { require.NoError(t, err) if head.Height() > upgradeH+(provingPeriod*3) { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) } // second round of miner checks - checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK) - checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK) - checkMiner(maddrB, types.NewInt(uint64(ssz)*sectersB), true, types.EmptyTSK) - checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, types.EmptyTSK) - checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK) + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) + checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK) + checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK) + checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK) + checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK) // disable post on minerB minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail() @@ -323,13 +328,14 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { }, nil) require.NoError(t, err) - fmt.Println("sent termination message:", smsg.Cid()) + t.Log("sent termination message:", smsg.Cid()) r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true) require.NoError(t, err) require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) - checkMiner(maddrD, types.NewInt(0), true, r.TipSet) + // assert inactive if the message landed in the tipset we run cron in + checkMiner(maddrD, types.NewInt(0), true, false, r.TipSet) } // go through another PP @@ -338,15 +344,14 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) { require.NoError(t, err) if head.Height() > upgradeH+(provingPeriod*5) { - fmt.Printf("Now head.Height = %d\n", head.Height()) + t.Logf("Now head.Height = %d", head.Height()) break } build.Clock.Sleep(blocktime) } - // third round of miner checks - checkMiner(maddrA, types.NewInt(uint64(ssz)*GenesisPreseals), true, types.EmptyTSK) - checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK) - checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK) - checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK) + checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK) + checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK) + checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK) + checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK) } diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go new file mode 100644 index 000000000..d7932b896 --- /dev/null +++ b/itests/deals_concurrent_test.go @@ -0,0 +1,176 @@ +package itests + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" +) + +// TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner +// architecture where the `mining/sealing/proving` node is a separate process from the `markets` node +func TestDealWithMarketAndMinerNode(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { + api.RunningNodeType = api.NodeMiner // TODO(anteva): fix me + + client, main, market, _ := kit.EnsembleWithMinerAndMarketNodes(t, kit.ThroughRPC()) + + dh := kit.NewDealHarness(t, client, main, market) + + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: n, + FastRetrieval: fastRetrieval, + CarExport: carExport, + StartEpoch: startEpoch, + }) + } + + // TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175# + cycles := []int{1} + for _, n := range cycles { + n := n + ns := fmt.Sprintf("%d", n) + t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) + t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) + } +} + +func TestDealCyclesConcurrent(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + blockTime := 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T, n int, fastRetrieval bool, carExport bool) { + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: n, + FastRetrieval: fastRetrieval, + CarExport: carExport, + StartEpoch: startEpoch, + }) + } + + // TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175# + cycles := []int{1} + for _, n := range cycles { + n := n + ns := fmt.Sprintf("%d", n) + t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) + t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) + } +} + +func TestSimultenousTransferLimit(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + blockTime := 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T) { + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(2))), + )) + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + ctx, cancel := context.WithCancel(context.Background()) + + du, err := miner.MarketDataTransferUpdates(ctx) + require.NoError(t, err) + + var maxOngoing int + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + ongoing := map[datatransfer.TransferID]struct{}{} + + for { + select { + case u := <-du: + t.Logf("%d - %s", u.TransferID, datatransfer.Statuses[u.Status]) + if u.Status == datatransfer.Ongoing { + ongoing[u.TransferID] = struct{}{} + } else { + delete(ongoing, u.TransferID) + } + + if len(ongoing) > maxOngoing { + maxOngoing = len(ongoing) + } + case <-ctx.Done(): + return + } + } + }() + + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: 1, // TODO: set to 20 after https://github.com/ipfs/go-graphsync/issues/175 is fixed + FastRetrieval: true, + StartEpoch: startEpoch, + }) + + cancel() + wg.Wait() + + require.LessOrEqual(t, maxOngoing, 2) + } + + runTest(t) +} diff --git a/itests/deals_offline_test.go b/itests/deals_offline_test.go new file mode 100644 index 000000000..ceae46fdf --- /dev/null +++ b/itests/deals_offline_test.go @@ -0,0 +1,101 @@ +package itests + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestOfflineDealFlow(t *testing.T) { + blocktime := 10 * time.Millisecond + + // For these tests where the block time is artificially short, just use + // a deal start epoch that is guaranteed to be far enough in the future + // so that the deal starts sealing in time + startEpoch := abi.ChainEpoch(2 << 12) + + runTest := func(t *testing.T, fastRet bool) { + ctx := context.Background() + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blocktime) + + dh := kit.NewDealHarness(t, client, miner, miner) + + // Create a random file and import on the client. + res, inFile := client.CreateImportFile(ctx, 1, 0) + + // Get the piece size and commP + rootCid := res.Root + pieceInfo, err := client.ClientDealPieceCID(ctx, rootCid) + require.NoError(t, err) + t.Log("FILE CID:", rootCid) + + // Create a storage deal with the miner + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + addr, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // Manual storage deal (offline deal) + dataRef := &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + Root: rootCid, + PieceCid: &pieceInfo.PieceCID, + PieceSize: pieceInfo.PieceSize.Unpadded(), + } + + proposalCid, err := client.ClientStartDeal(ctx, &api.StartDealParams{ + Data: dataRef, + Wallet: addr, + Miner: maddr, + EpochPrice: types.NewInt(1000000), + DealStartEpoch: startEpoch, + MinBlocksDuration: uint64(build.MinDealDuration), + FastRetrieval: fastRet, + }) + require.NoError(t, err) + + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + // Create a CAR file from the raw file + carFileDir := t.TempDir() + carFilePath := filepath.Join(carFileDir, "out.car") + err = client.ClientGenCar(ctx, api.FileRef{Path: inFile}, carFilePath) + require.NoError(t, err) + + // Import the CAR file on the miner - this is the equivalent to + // transferring the file across the wire in a normal (non-offline) deal + err = miner.DealsImportData(ctx, *proposalCid, carFilePath) + require.NoError(t, err) + + // Wait for the deal to be published + dh.WaitDealPublished(ctx, proposalCid) + + t.Logf("deal published, retrieving") + + // Retrieve the deal + outFile := dh.PerformRetrieval(ctx, proposalCid, rootCid, false) + + kit.AssertFilesEqual(t, inFile, outFile) + + } + + t.Run("stdretrieval", func(t *testing.T) { runTest(t, false) }) + t.Run("fastretrieval", func(t *testing.T) { runTest(t, true) }) +} diff --git a/itests/deals_power_test.go b/itests/deals_power_test.go new file mode 100644 index 000000000..16ad8ae6a --- /dev/null +++ b/itests/deals_power_test.go @@ -0,0 +1,61 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/lotus/itests/kit" +) + +func TestFirstDealEnablesMining(t *testing.T) { + // test making a deal with a fresh miner, and see if it starts to mine. + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + var ( + client kit.TestFullNode + genMiner kit.TestMiner // bootstrap + provider kit.TestMiner // no sectors, will need to create one + ) + + ens := kit.NewEnsemble(t, kit.MockProofs()) + ens.FullNode(&client) + ens.Miner(&genMiner, &client, kit.WithAllSubsystems()) + ens.Miner(&provider, &client, kit.WithAllSubsystems(), kit.PresealSectors(0)) + ens.Start().InterconnectAll().BeginMining(50 * time.Millisecond) + + ctx := context.Background() + + dh := kit.NewDealHarness(t, &client, &provider, &provider) + + ref, _ := client.CreateImportFile(ctx, 5, 0) + + t.Log("FILE CID:", ref.Root) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // start a goroutine to monitor head changes from the client + // once the provider has mined a block, thanks to the power acquired from the deal, + // we pass the test. + providerMined := make(chan struct{}) + + go func() { + _ = client.WaitTillChain(ctx, kit.BlockMinedBy(provider.ActorAddr)) + close(providerMined) + }() + + // now perform the deal. + deal := dh.StartDeal(ctx, ref.Root, false, 0) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + + dh.WaitDealSealed(ctx, deal, false, false, nil) + + <-providerMined +} diff --git a/itests/deals_pricing_test.go b/itests/deals_pricing_test.go new file mode 100644 index 000000000..eb28af0bd --- /dev/null +++ b/itests/deals_pricing_test.go @@ -0,0 +1,131 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestQuotePriceForUnsealedRetrieval(t *testing.T) { + var ( + ctx = context.Background() + blocktime = 50 * time.Millisecond + ) + + kit.QuietMiningLogs() + + client, miner, ens := kit.EnsembleMinimal(t) + ens.InterconnectAll().BeginMining(blocktime) + + var ( + ppb = int64(1) + unsealPrice = int64(77) + ) + + // Set unsealed price to non-zero + ask, err := miner.MarketGetRetrievalAsk(ctx) + require.NoError(t, err) + ask.PricePerByte = abi.NewTokenAmount(ppb) + ask.UnsealPrice = abi.NewTokenAmount(unsealPrice) + err = miner.MarketSetRetrievalAsk(ctx, ask) + require.NoError(t, err) + + dh := kit.NewDealHarness(t, client, miner, miner) + + deal1, res1, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) + + // one more storage deal for the same data + _, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) + require.Equal(t, res1.Root, res2.Root) + + // Retrieval + dealInfo, err := client.ClientGetDealInfo(ctx, *deal1) + require.NoError(t, err) + + // fetch quote -> zero for unsealed price since unsealed file already exists. + offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) + require.NoError(t, err) + require.Len(t, offers, 2) + require.Equal(t, offers[0], offers[1]) + require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64()) + require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) + + // remove ONLY one unsealed file + ss, err := miner.StorageList(context.Background()) + require.NoError(t, err) + _, err = miner.SectorsList(ctx) + require.NoError(t, err) + +iLoop: + for storeID, sd := range ss { + for _, sector := range sd { + err := miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed) + require.NoError(t, err) + break iLoop // remove ONLY one + } + } + + // get retrieval quote -> zero for unsealed price as unsealed file exists. + offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) + require.NoError(t, err) + require.Len(t, offers, 2) + require.Equal(t, offers[0], offers[1]) + require.Equal(t, uint64(0), offers[0].UnsealPrice.Uint64()) + require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) + + // remove the other unsealed file as well + ss, err = miner.StorageList(context.Background()) + require.NoError(t, err) + _, err = miner.SectorsList(ctx) + require.NoError(t, err) + for storeID, sd := range ss { + for _, sector := range sd { + require.NoError(t, miner.StorageDropSector(ctx, storeID, sector.SectorID, storiface.FTUnsealed)) + } + } + + // fetch quote -> non-zero for unseal price as we no more unsealed files. + offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) + require.NoError(t, err) + require.Len(t, offers, 2) + require.Equal(t, offers[0], offers[1]) + require.Equal(t, uint64(unsealPrice), offers[0].UnsealPrice.Uint64()) + total := (dealInfo.Size * uint64(ppb)) + uint64(unsealPrice) + require.Equal(t, total, offers[0].MinPrice.Uint64()) +} + +func TestZeroPricePerByteRetrieval(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + var ( + blockTime = 10 * time.Millisecond + startEpoch = abi.ChainEpoch(2 << 12) + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blockTime) + + ctx := context.Background() + + ask, err := miner.MarketGetRetrievalAsk(ctx) + require.NoError(t, err) + + ask.PricePerByte = abi.NewTokenAmount(0) + err = miner.MarketSetRetrievalAsk(ctx, ask) + require.NoError(t, err) + + dh := kit.NewDealHarness(t, client, miner, miner) + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ + N: 1, + StartEpoch: startEpoch, + }) +} diff --git a/itests/deals_publish_test.go b/itests/deals_publish_test.go new file mode 100644 index 000000000..10592d8b4 --- /dev/null +++ b/itests/deals_publish_test.go @@ -0,0 +1,131 @@ +package itests + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/storage" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/stretchr/testify/require" +) + +func TestPublishDealsBatching(t *testing.T) { + var ( + ctx = context.Background() + publishPeriod = 10 * time.Second + maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2 + startEpoch = abi.ChainEpoch(2 << 12) + ) + + kit.QuietMiningLogs() + + publisherKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + opts := node.Options( + node.Override(new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + }), + ), + node.Override(new(*storage.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ + DealPublishControl: []string{ + publisherKey.Address.String(), + }, + DisableOwnerFallback: true, + DisableWorkerFallback: true, + })), + kit.LatestActorsAt(-1), + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.Account(publisherKey, types.FromFil(10)), kit.MockProofs(), kit.ConstructorOpts(opts)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + _, err = client.WalletImport(ctx, &publisherKey.KeyInfo) + require.NoError(t, err) + + miner.SetControlAddresses(publisherKey.Address) + + dh := kit.NewDealHarness(t, client, miner, miner) + + // Starts a deal and waits until it's published + runDealTillPublish := func(rseed int) { + res, _ := client.CreateImportFile(ctx, rseed, 0) + + upds, err := client.ClientGetDealUpdates(ctx) + require.NoError(t, err) + + dh.StartDeal(ctx, res.Root, false, startEpoch) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + + done := make(chan struct{}) + go func() { + for upd := range upds { + if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit { + done <- struct{}{} + } + } + }() + <-done + } + + // Run three deals in parallel + done := make(chan struct{}, maxDealsPerMsg+1) + for rseed := 1; rseed <= 3; rseed++ { + rseed := rseed + go func() { + runDealTillPublish(rseed) + done <- struct{}{} + }() + } + + // Wait for two of the deals to be published + for i := 0; i < int(maxDealsPerMsg); i++ { + <-done + } + + // Expect a single PublishStorageDeals message that includes the first two deals + msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1) + require.NoError(t, err) + count := 0 + for _, msgCid := range msgCids { + msg, err := client.ChainGetMessage(ctx, msgCid) + require.NoError(t, err) + + if msg.Method == market.Methods.PublishStorageDeals { + count++ + var pubDealsParams market2.PublishStorageDealsParams + err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params)) + require.NoError(t, err) + require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg)) + require.Equal(t, publisherKey.Address.String(), msg.From.String()) + } + } + require.Equal(t, 1, count) + + // The third deal should be published once the publish period expires. + // Allow a little padding as it takes a moment for the state change to + // be noticed by the client. + padding := 10 * time.Second + select { + case <-time.After(publishPeriod + padding): + require.Fail(t, "Expected 3rd deal to be published once publish period elapsed") + case <-done: // Success + } +} diff --git a/itests/deals_test.go b/itests/deals_test.go new file mode 100644 index 000000000..f2e106f1f --- /dev/null +++ b/itests/deals_test.go @@ -0,0 +1,42 @@ +package itests + +import ( + "testing" + "time" + + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" +) + +func TestDealsWithSealingAndRPC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + var blockTime = 50 * time.Millisecond + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs. + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + t.Run("stdretrieval", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) + }) + + t.Run("fastretrieval", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) + }) + + t.Run("fastretrieval-twodeals-sequential", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1, FastRetrieval: true}) + }) +} diff --git a/itests/doc.go b/itests/doc.go new file mode 100644 index 000000000..474e57277 --- /dev/null +++ b/itests/doc.go @@ -0,0 +1,2 @@ +// Package itests contains integration tests for Lotus. +package itests diff --git a/cmd/lotus-gateway/endtoend_test.go b/itests/gateway_test.go similarity index 62% rename from cmd/lotus-gateway/endtoend_test.go rename to itests/gateway_test.go index 084218b24..f9e4a0fb6 100644 --- a/cmd/lotus-gateway/endtoend_test.go +++ b/itests/gateway_test.go @@ -1,57 +1,48 @@ -package main +package itests import ( "bytes" "context" "fmt" "math" - "os" + "net" "testing" "time" - "github.com/filecoin-project/lotus/cli" - clitest "github.com/filecoin-project/lotus/cli/test" - - init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" - multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/gateway" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/itests/multisig" "github.com/filecoin-project/lotus/node" - builder "github.com/filecoin-project/lotus/node/test" + + init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init" + multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" ) -const maxLookbackCap = time.Duration(math.MaxInt64) -const maxStateWaitLookbackLimit = stmgr.LookbackNoLimit +const ( + maxLookbackCap = time.Duration(math.MaxInt64) + maxStateWaitLookbackLimit = stmgr.LookbackNoLimit +) -func init() { - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) -} - -// TestWalletMsig tests that API calls to wallet and msig can be made on a lite +// TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite // node that is connected through a gateway to a full API node -func TestWalletMsig(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayWalletMsig(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodes(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() lite := nodes.lite full := nodes.full @@ -102,7 +93,27 @@ func TestWalletMsig(t *testing.T) { // Create an msig with three of the addresses and threshold of two sigs msigAddrs := walletAddrs[:3] amt := types.NewInt(1000) - addProposal, err := lite.MsigCreate(ctx, 2, msigAddrs, abi.ChainEpoch(50), amt, liteWalletAddr, types.NewInt(0)) + proto, err := lite.MsigCreate(ctx, 2, msigAddrs, abi.ChainEpoch(50), amt, liteWalletAddr, types.NewInt(0)) + require.NoError(t, err) + + doSend := func(proto *api.MessagePrototype) (cid.Cid, error) { + if proto.ValidNonce { + sm, err := lite.WalletSignMessage(ctx, proto.Message.From, &proto.Message) + if err != nil { + return cid.Undef, err + } + return lite.MpoolPush(ctx, sm) + } + + sm, err := lite.MpoolPushMessage(ctx, &proto.Message, nil) + if err != nil { + return cid.Undef, err + } + + return sm.Cid(), nil + } + + addProposal, err := doSend(proto) require.NoError(t, err) res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) @@ -122,7 +133,10 @@ func TestWalletMsig(t *testing.T) { require.Less(t, msigBalance.Int64(), amt.Int64()) // Propose to add a new address to the msig - addProposal, err = lite.MsigAddPropose(ctx, msig, walletAddrs[0], walletAddrs[3], false) + proto, err = lite.MsigAddPropose(ctx, msig, walletAddrs[0], walletAddrs[3], false) + require.NoError(t, err) + + addProposal, err = doSend(proto) require.NoError(t, err) res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) @@ -136,7 +150,10 @@ func TestWalletMsig(t *testing.T) { // Approve proposal (proposer is first (implicit) signer, approver is // second signer txnID := uint64(proposeReturn.TxnID) - approval1, err := lite.MsigAddApprove(ctx, msig, walletAddrs[1], txnID, walletAddrs[0], walletAddrs[3], false) + proto, err = lite.MsigAddApprove(ctx, msig, walletAddrs[1], txnID, walletAddrs[0], walletAddrs[3], false) + require.NoError(t, err) + + approval1, err := doSend(proto) require.NoError(t, err) res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true) @@ -149,54 +166,55 @@ func TestWalletMsig(t *testing.T) { require.True(t, approveReturn.Applied) } -// TestMsigCLI tests that msig CLI calls can be made +// TestGatewayMsigCLI tests that msig CLI calls can be made // on a lite node that is connected through a gateway to a full API node -func TestMsigCLI(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayMsigCLI(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() lite := nodes.lite - clitest.RunMultisigTest(t, cli.Commands, lite) + multisig.RunMultisigTests(t, lite) } -func TestDealFlow(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayDealFlow(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() + + time.Sleep(5 * time.Second) // For these tests where the block time is artificially short, just use // a deal start epoch that is guaranteed to be far enough in the future // so that the deal starts sealing in time dealStartEpoch := abi.ChainEpoch(2 << 12) - test.MakeDeal(t, ctx, 6, nodes.lite, nodes.miner, false, false, dealStartEpoch) + + dh := kit.NewDealHarness(t, nodes.lite, nodes.miner, nodes.miner) + dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{ + Rseed: 6, + StartEpoch: dealStartEpoch, + }) + dh.PerformRetrieval(ctx, dealCid, res.Root, false) } -func TestCLIDealFlow(t *testing.T) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() +func TestGatewayCLIDealFlow(t *testing.T) { + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - defer nodes.closer() - clitest.RunClientTest(t, cli.Commands, nodes.lite) + kit.RunClientTest(t, cli.Commands, nodes.lite) } type testNodes struct { - lite test.TestNode - full test.TestNode - miner test.TestStorageNode - closer jsonrpc.ClientCloser + lite *kit.TestFullNode + full *kit.TestFullNode + miner *kit.TestMiner } func startNodesWithFunds( @@ -212,8 +230,8 @@ func startNodesWithFunds( fullWalletAddr, err := nodes.full.WalletDefaultAddress(ctx) require.NoError(t, err) - // Create a wallet on the lite node - liteWalletAddr, err := nodes.lite.WalletNew(ctx, types.KTSecp256k1) + // Get the lite node default wallet address. + liteWalletAddr, err := nodes.lite.WalletDefaultAddress(ctx) require.NoError(t, err) // Send some funds from the full node to the lite node @@ -232,67 +250,50 @@ func startNodes( ) *testNodes { var closer jsonrpc.ClientCloser - // Create one miner and two full nodes. + var ( + full *kit.TestFullNode + miner *kit.TestMiner + lite kit.TestFullNode + ) + + // - Create one full node and one lite node // - Put a gateway server in front of full node 1 // - Start full node 2 in lite mode // - Connect lite node -> gateway server -> full node - opts := append( - // Full node - test.OneFull, - // Lite node - test.FullNodeOpts{ - Lite: true, - Opts: func(nodes []test.TestNode) node.Option { - fullNode := nodes[0] - // Create a gateway server in front of the full node - gapiImpl := newGatewayAPI(fullNode, lookbackCap, stateWaitLookbackLimit) - _, addr, err := builder.CreateRPCServer(t, map[string]interface{}{ - "/rpc/v1": gapiImpl, - "/rpc/v0": api.Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), gapiImpl), - }) - require.NoError(t, err) + // create the full node and the miner. + var ens *kit.Ensemble + full, miner, ens = kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blocktime) - // Create a gateway client API that connects to the gateway server - var gapi api.Gateway - gapi, closer, err = client.NewGatewayRPCV1(ctx, addr+"/rpc/v1", nil) - require.NoError(t, err) - - // Provide the gateway API to dependency injection - return node.Override(new(api.Gateway), gapi) - }, - }, - ) - n, sn := builder.RPCMockSbBuilder(t, opts, test.OneMiner) - - full := n[0] - lite := n[1] - miner := sn[0] - - // Get the listener address for the full node - fullAddr, err := full.NetAddrsListen(ctx) + // Create a gateway server in front of the full node + gwapi := gateway.NewNode(full, lookbackCap, stateWaitLookbackLimit) + handler, err := gateway.Handler(gwapi) require.NoError(t, err) - // Connect the miner and the full node - err = miner.NetConnect(ctx, fullAddr) + l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) - // Connect the miner and the lite node (so that the lite node can send - // data to the miner) - liteAddr, err := lite.NetAddrsListen(ctx) - require.NoError(t, err) - err = miner.NetConnect(ctx, liteAddr) - require.NoError(t, err) + srv, _ := kit.CreateRPCServer(t, handler, l) - // Start mining blocks - bm := test.NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() - t.Cleanup(bm.Stop) + // Create a gateway client API that connects to the gateway server + var gapi api.Gateway + gapi, closer, err = client.NewGatewayRPCV1(ctx, "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) + require.NoError(t, err) + t.Cleanup(closer) - return &testNodes{lite: lite, full: full, miner: miner, closer: closer} + ens.FullNode(&lite, + kit.LiteNode(), + kit.ThroughRPC(), + kit.ConstructorOpts( + node.Override(new(api.Gateway), gapi), + ), + ).Start().InterconnectAll() + + return &testNodes{lite: &lite, full: full, miner: miner} } -func sendFunds(ctx context.Context, fromNode test.TestNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error { +func sendFunds(ctx context.Context, fromNode *kit.TestFullNode, fromAddr address.Address, toAddr address.Address, amt types.BigInt) error { msg := &types.Message{ From: fromAddr, To: toAddr, @@ -304,7 +305,7 @@ func sendFunds(ctx context.Context, fromNode test.TestNode, fromAddr address.Add return err } - res, err := fromNode.StateWaitMsg(ctx, sm.Cid(), 1, api.LookbackNoLimit, true) + res, err := fromNode.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) if err != nil { return err } diff --git a/itests/get_messages_in_ts_test.go b/itests/get_messages_in_ts_test.go new file mode 100644 index 000000000..61219a316 --- /dev/null +++ b/itests/get_messages_in_ts_test.go @@ -0,0 +1,104 @@ +package itests + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" + + "time" + + "github.com/filecoin-project/go-state-types/big" +) + +func TestChainGetMessagesInTs(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + // create a new address where to send funds. + addr, err := client.WalletNew(ctx, types.KTBLS) + require.NoError(t, err) + + // get the existing balance from the default wallet to then split it. + bal, err := client.WalletBalance(ctx, client.DefaultKey.Address) + require.NoError(t, err) + + const iterations = 100 + + // we'll send half our balance (saving the other half for gas), + // in `iterations` increments. + toSend := big.Div(bal, big.NewInt(2)) + each := big.Div(toSend, big.NewInt(iterations)) + + waitAllCh := make(chan struct{}) + go func() { + headChangeCh, err := client.ChainNotify(ctx) + require.NoError(t, err) + <-headChangeCh //skip hccurrent + + count := 0 + for { + select { + case headChanges := <-headChangeCh: + for _, change := range headChanges { + if change.Type == store.HCApply { + msgs, err := client.ChainGetMessagesInTipset(ctx, change.Val.Key()) + require.NoError(t, err) + count += len(msgs) + if count == iterations { + waitAllCh <- struct{}{} + } + } + } + } + } + }() + + var sms []*types.SignedMessage + for i := 0; i < iterations; i++ { + msg := &types.Message{ + From: client.DefaultKey.Address, + To: addr, + Value: each, + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + require.EqualValues(t, i, sm.Message.Nonce) + + sms = append(sms, sm) + } + + select { + case <-waitAllCh: + case <-time.After(time.Minute): + t.Errorf("timeout to wait for pack messages") + } + + for _, sm := range sms { + msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + + ts, err := client.ChainGetTipSet(ctx, msgLookup.TipSet) + require.NoError(t, err) + + msgs, err := client.ChainGetMessagesInTipset(ctx, ts.Parents()) + require.NoError(t, err) + + var found bool + for _, msg := range msgs { + if msg.Cid == sm.Cid() { + found = true + } + } + require.EqualValues(t, true, found, "expect got message in tipset %v", msgLookup.TipSet) + } +} diff --git a/itests/kit/blockminer.go b/itests/kit/blockminer.go new file mode 100644 index 000000000..2c9bd47c6 --- /dev/null +++ b/itests/kit/blockminer.go @@ -0,0 +1,124 @@ +package kit + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/miner" + "github.com/stretchr/testify/require" +) + +// BlockMiner is a utility that makes a test miner Mine blocks on a timer. +type BlockMiner struct { + t *testing.T + miner *TestMiner + + nextNulls int64 + wg sync.WaitGroup + cancel context.CancelFunc +} + +func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { + return &BlockMiner{ + t: t, + miner: miner, + cancel: func() {}, + } +} + +func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) { + time.Sleep(time.Second) + + // wrap context in a cancellable context. + ctx, bm.cancel = context.WithCancel(ctx) + + bm.wg.Add(1) + go func() { + defer bm.wg.Done() + + for { + select { + case <-time.After(blocktime): + case <-ctx.Done(): + return + } + + nulls := atomic.SwapInt64(&bm.nextNulls, 0) + err := bm.miner.MineOne(ctx, miner.MineReq{ + InjectNulls: abi.ChainEpoch(nulls), + Done: func(bool, abi.ChainEpoch, error) {}, + }) + switch { + case err == nil: // wrap around + case ctx.Err() != nil: // context fired. + return + default: // log error + bm.t.Error(err) + } + } + }() +} + +// InjectNulls injects the specified amount of null rounds in the next +// mining rounds. +func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) { + atomic.AddInt64(&bm.nextNulls, int64(rounds)) +} + +func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) { + for i := 0; i < 1000; i++ { + var ( + success bool + err error + epoch abi.ChainEpoch + wait = make(chan struct{}) + ) + + doneFn := func(win bool, ep abi.ChainEpoch, e error) { + success = win + err = e + epoch = ep + wait <- struct{}{} + } + + mineErr := bm.miner.MineOne(ctx, miner.MineReq{Done: doneFn}) + require.NoError(bm.t, mineErr) + <-wait + + require.NoError(bm.t, err) + + if success { + // Wait until it shows up on the given full nodes ChainHead + nloops := 200 + for i := 0; i < nloops; i++ { + ts, err := fn.ChainHead(ctx) + require.NoError(bm.t, err) + + if ts.Height() == epoch { + break + } + + require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node") + time.Sleep(time.Millisecond * 10) + } + + if cb != nil { + cb(epoch) + } + return + } + bm.t.Log("did not Mine block, trying again", i) + } + bm.t.Fatal("failed to Mine 1000 times in a row...") +} + +// Stop stops the block miner. +func (bm *BlockMiner) Stop() { + bm.t.Log("shutting down mining") + bm.cancel() + bm.wg.Wait() +} diff --git a/cli/test/client.go b/itests/kit/client.go similarity index 65% rename from cli/test/client.go rename to itests/kit/client.go index 2a48b7b64..bd81e0c04 100644 --- a/cli/test/client.go +++ b/itests/kit/client.go @@ -1,9 +1,10 @@ -package test +package kit import ( "context" "fmt" "io/ioutil" + "math/rand" "os" "path/filepath" "regexp" @@ -11,9 +12,7 @@ import ( "testing" "time" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api/test" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -21,8 +20,8 @@ import ( lcli "github.com/urfave/cli/v2" ) -// RunClientTest exercises some of the client CLI commands -func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) { +// RunClientTest exercises some of the Client CLI commands +func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -30,7 +29,7 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) mockCLI := NewMockCLI(ctx, t, cmds) clientCLI := mockCLI.Client(clientNode.ListenAddr) - // Get the miner address + // Get the Miner address addrs, err := clientNode.StateListMiners(ctx, types.EmptyTSK) require.NoError(t, err) require.Len(t, addrs, 1) @@ -38,13 +37,14 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) minerAddr := addrs[0] fmt.Println("Miner:", minerAddr) - // client query-ask + // client query-ask out := clientCLI.RunCmd("client", "query-ask", minerAddr.String()) require.Regexp(t, regexp.MustCompile("Ask:"), out) // Create a deal (non-interactive) // client deal --start-epoch= 1000000attofil - res, _, err := test.CreateClientFile(ctx, clientNode, 1, 0) + res, _, _, err := CreateImportFile(ctx, clientNode, 1, 0) + require.NoError(t, err) startEpoch := fmt.Sprintf("--start-epoch=%d", 2<<12) dataCid := res.Root @@ -58,9 +58,9 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) // // (in days) // - // "no" (verified client) + // "no" (verified Client) // "yes" (confirm deal) - res, _, err = test.CreateClientFile(ctx, clientNode, 2, 0) + res, _, _, err = CreateImportFile(ctx, clientNode, 2, 0) require.NoError(t, err) dataCid2 := res.Root duration = fmt.Sprintf("%d", build.MinDealDuration/builtin.EpochsInDay) @@ -91,16 +91,19 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) } dealStatus = parts[3] fmt.Println(" Deal status:", dealStatus) - if dealComplete(t, dealStatus) { + + st := CategorizeDealState(dealStatus) + require.NotEqual(t, TestDealStateFailed, st) + if st == TestDealStateComplete { break } time.Sleep(time.Second) } - // Retrieve the first file from the miner + // Retrieve the first file from the Miner // client retrieve - tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client") + tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client") require.NoError(t, err) path := filepath.Join(tmpdir, "outfile.dat") out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path) @@ -108,13 +111,36 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) require.Regexp(t, regexp.MustCompile("Success"), out) } -func dealComplete(t *testing.T, dealStatus string) bool { - switch dealStatus { - case "StorageDealFailing", "StorageDealError": - t.Fatal(xerrors.Errorf("Storage deal failed with status: " + dealStatus)) - case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed": - return true +func CreateImportFile(ctx context.Context, client api.FullNode, rseed int, size int) (res *api.ImportRes, path string, data []byte, err error) { + data, path, err = createRandomFile(rseed, size) + if err != nil { + return nil, "", nil, err } - return false + res, err = client.ClientImport(ctx, api.FileRef{Path: path}) + if err != nil { + return nil, "", nil, err + } + return res, path, data, nil +} + +func createRandomFile(rseed, size int) ([]byte, string, error) { + if size == 0 { + size = 1600 + } + data := make([]byte, size) + rand.New(rand.NewSource(int64(rseed))).Read(data) + + dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-") + if err != nil { + return nil, "", err + } + + path := filepath.Join(dir, "sourcefile.dat") + err = ioutil.WriteFile(path, data, 0644) + if err != nil { + return nil, "", err + } + + return data, path, nil } diff --git a/itests/kit/control.go b/itests/kit/control.go new file mode 100644 index 000000000..73ac39b7a --- /dev/null +++ b/itests/kit/control.go @@ -0,0 +1,42 @@ +package kit + +import ( + "context" + + "github.com/stretchr/testify/require" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" +) + +func (tm *TestMiner) SetControlAddresses(addrs ...addr.Address) { + ctx := context.TODO() + + mi, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, types.EmptyTSK) + require.NoError(tm.t, err) + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: addrs, + } + + sp, err := actors.SerializeParams(cwp) + require.NoError(tm.t, err) + + smsg, err := tm.FullNode.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: tm.ActorAddr, + Method: miner.Methods.ChangeWorkerAddress, + + Value: big.Zero(), + Params: sp, + }, nil) + require.NoError(tm.t, err) + + tm.FullNode.WaitMsg(ctx, smsg.Cid()) +} diff --git a/itests/kit/deals.go b/itests/kit/deals.go new file mode 100644 index 000000000..4cee13925 --- /dev/null +++ b/itests/kit/deals.go @@ -0,0 +1,313 @@ +package kit + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/ipfs/go-cid" + files "github.com/ipfs/go-ipfs-files" + ipld "github.com/ipfs/go-ipld-format" + dag "github.com/ipfs/go-merkledag" + dstest "github.com/ipfs/go-merkledag/test" + unixfile "github.com/ipfs/go-unixfs/file" + "github.com/ipld/go-car" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +type DealHarness struct { + t *testing.T + client *TestFullNode + main *TestMiner + market *TestMiner +} + +type MakeFullDealParams struct { + Rseed int + FastRet bool + StartEpoch abi.ChainEpoch + + // SuspendUntilCryptoeconStable suspends deal-making, until cryptoecon + // parameters are stabilised. This affects projected collateral, and tests + // will fail in network version 13 and higher if deals are started too soon + // after network birth. + // + // The reason is that the formula for collateral calculation takes + // circulating supply into account: + // + // [portion of power this deal will be] * [~1% of tokens]. + // + // In the first epochs after genesis, the total circulating supply is + // changing dramatically in percentual terms. Therefore, if the deal is + // proposed too soon, by the time it gets published on chain, the quoted + // provider collateral will no longer be valid. + // + // The observation is that deals fail with: + // + // GasEstimateMessageGas error: estimating gas used: message execution + // failed: exit 16, reason: Provider collateral out of bounds. (RetCode=16) + // + // Enabling this will suspend deal-making until the network has reached a + // height of 300. + SuspendUntilCryptoeconStable bool +} + +// NewDealHarness creates a test harness that contains testing utilities for deals. +func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market *TestMiner) *DealHarness { + return &DealHarness{ + t: t, + client: client, + main: main, + market: market, + } +} + +// MakeOnlineDeal makes an online deal, generating a random file with the +// supplied seed, and setting the specified fast retrieval flag and start epoch +// on the storage deal. It returns when the deal is sealed. +// +// TODO: convert input parameters to struct, and add size as an input param. +func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) { + res, path = dh.client.CreateImportFile(ctx, params.Rseed, 0) + + dh.t.Logf("FILE CID: %s", res.Root) + + if params.SuspendUntilCryptoeconStable { + dh.t.Logf("deal-making suspending until cryptecon parameters have stabilised") + ts := dh.client.WaitTillChain(ctx, HeightAtLeast(300)) + dh.t.Logf("deal-making continuing; current height is %d", ts.Height()) + } + + deal = dh.StartDeal(ctx, res.Root, params.FastRet, params.StartEpoch) + + // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this + time.Sleep(time.Second) + dh.WaitDealSealed(ctx, deal, false, false, nil) + + return deal, res, path +} + +// StartDeal starts a storage deal between the client and the miner. +func (dh *DealHarness) StartDeal(ctx context.Context, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid { + maddr, err := dh.main.ActorAddress(ctx) + require.NoError(dh.t, err) + + addr, err := dh.client.WalletDefaultAddress(ctx) + require.NoError(dh.t, err) + + deal, err := dh.client.ClientStartDeal(ctx, &api.StartDealParams{ + Data: &storagemarket.DataRef{ + TransferType: storagemarket.TTGraphsync, + Root: fcid, + }, + Wallet: addr, + Miner: maddr, + EpochPrice: types.NewInt(1000000), + DealStartEpoch: startEpoch, + MinBlocksDuration: uint64(build.MinDealDuration), + FastRetrieval: fastRet, + }) + require.NoError(dh.t, err) + + return deal +} + +// WaitDealSealed waits until the deal is sealed. +func (dh *DealHarness) WaitDealSealed(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) { +loop: + for { + di, err := dh.client.ClientGetDealInfo(ctx, *deal) + require.NoError(dh.t, err) + + switch di.State { + case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing: + if noseal { + return + } + if !noSealStart { + dh.StartSealingWaiting(ctx) + } + case storagemarket.StorageDealProposalRejected: + dh.t.Fatal("deal rejected") + case storagemarket.StorageDealFailing: + dh.t.Fatal("deal failed") + case storagemarket.StorageDealError: + dh.t.Fatal("deal errored", di.Message) + case storagemarket.StorageDealActive: + dh.t.Log("COMPLETE", di) + break loop + } + + mds, err := dh.market.MarketListIncompleteDeals(ctx) + require.NoError(dh.t, err) + + var minerState storagemarket.StorageDealStatus + for _, md := range mds { + if md.DealID == di.DealID { + minerState = md.State + break + } + } + + dh.t.Logf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState]) + time.Sleep(time.Second / 2) + if cb != nil { + cb() + } + } +} + +// WaitDealPublished waits until the deal is published. +func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) { + subCtx, cancel := context.WithCancel(ctx) + defer cancel() + + updates, err := dh.market.MarketGetDealUpdates(subCtx) + require.NoError(dh.t, err) + + for { + select { + case <-ctx.Done(): + dh.t.Fatal("context timeout") + case di := <-updates: + if deal.Equals(di.ProposalCid) { + switch di.State { + case storagemarket.StorageDealProposalRejected: + dh.t.Fatal("deal rejected") + case storagemarket.StorageDealFailing: + dh.t.Fatal("deal failed") + case storagemarket.StorageDealError: + dh.t.Fatal("deal errored", di.Message) + case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive: + dh.t.Log("COMPLETE", di) + return + } + dh.t.Log("Deal state: ", storagemarket.DealStates[di.State]) + } + } + } +} + +func (dh *DealHarness) StartSealingWaiting(ctx context.Context) { + snums, err := dh.main.SectorsList(ctx) + require.NoError(dh.t, err) + + for _, snum := range snums { + si, err := dh.main.SectorsStatus(ctx, snum, false) + require.NoError(dh.t, err) + + dh.t.Logf("Sector state: %s", si.State) + if si.State == api.SectorState(sealing.WaitDeals) { + require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum)) + } + + dh.main.FlushSealingBatches(ctx) + } +} + +func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool) (path string) { + // perform retrieval. + info, err := dh.client.ClientGetDealInfo(ctx, *deal) + require.NoError(dh.t, err) + + offers, err := dh.client.ClientFindData(ctx, root, &info.PieceCID) + require.NoError(dh.t, err) + require.NotEmpty(dh.t, offers, "no offers") + + carFile, err := ioutil.TempFile(dh.t.TempDir(), "ret-car") + require.NoError(dh.t, err) + + defer carFile.Close() //nolint:errcheck + + caddr, err := dh.client.WalletDefaultAddress(ctx) + require.NoError(dh.t, err) + + ref := &api.FileRef{ + Path: carFile.Name(), + IsCAR: carExport, + } + + updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) + require.NoError(dh.t, err) + + for update := range updates { + require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err) + } + + ret := carFile.Name() + if carExport { + actualFile := dh.ExtractFileFromCAR(ctx, carFile) + ret = actualFile.Name() + _ = actualFile.Close() //nolint:errcheck + } + + return ret +} + +func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) { + bserv := dstest.Bserv() + ch, err := car.LoadCar(bserv.Blockstore(), file) + require.NoError(dh.t, err) + + b, err := bserv.GetBlock(ctx, ch.Roots[0]) + require.NoError(dh.t, err) + + nd, err := ipld.Decode(b) + require.NoError(dh.t, err) + + dserv := dag.NewDAGService(bserv) + fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) + require.NoError(dh.t, err) + + tmpfile, err := ioutil.TempFile(dh.t.TempDir(), "file-in-car") + require.NoError(dh.t, err) + + defer tmpfile.Close() //nolint:errcheck + + err = files.WriteTo(fil, tmpfile.Name()) + require.NoError(dh.t, err) + + return tmpfile +} + +type RunConcurrentDealsOpts struct { + N int + FastRetrieval bool + CarExport bool + StartEpoch abi.ChainEpoch +} + +func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) { + errgrp, _ := errgroup.WithContext(context.Background()) + for i := 0; i < opts.N; i++ { + i := i + errgrp.Go(func() (err error) { + defer func() { + // This is necessary because golang can't deal with test + // failures being reported from children goroutines ¯\_(ツ)_/¯ + if r := recover(); r != nil { + err = fmt.Errorf("deal failed: %s", r) + } + }() + deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{ + Rseed: 5 + i, + FastRet: opts.FastRetrieval, + StartEpoch: opts.StartEpoch, + }) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport) + AssertFilesEqual(dh.t, inPath, outPath) + return nil + }) + } + require.NoError(dh.t, errgrp.Wait()) +} diff --git a/itests/kit/deals_state.go b/itests/kit/deals_state.go new file mode 100644 index 000000000..617a6d28e --- /dev/null +++ b/itests/kit/deals_state.go @@ -0,0 +1,21 @@ +package kit + +type TestDealState int + +const ( + TestDealStateFailed = TestDealState(-1) + TestDealStateInProgress = TestDealState(0) + TestDealStateComplete = TestDealState(1) +) + +// CategorizeDealState categorizes deal states into one of three states: +// Complete, InProgress, Failed. +func CategorizeDealState(dealStatus string) TestDealState { + switch dealStatus { + case "StorageDealFailing", "StorageDealError": + return TestDealStateFailed + case "StorageDealStaged", "StorageDealAwaitingPreCommit", "StorageDealSealing", "StorageDealActive", "StorageDealExpired", "StorageDealSlashed": + return TestDealStateComplete + } + return TestDealStateInProgress +} diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go new file mode 100644 index 000000000..4cce3cf9e --- /dev/null +++ b/itests/kit/ensemble.go @@ -0,0 +1,706 @@ +package kit + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "io/ioutil" + "net" + "sync" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-storedcounter" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/gen" + genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/lotus/genesis" + lotusminer "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + testing2 "github.com/filecoin-project/lotus/node/modules/testing" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/storage/mockstorage" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" + "github.com/ipfs/go-datastore" + libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" + mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" + "github.com/stretchr/testify/require" +) + +func init() { + chain.BootstrapPeerThreshold = 1 + messagepool.HeadChangeCoalesceMinDelay = time.Microsecond + messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond + messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond +} + +// Ensemble is a collection of nodes instantiated within a test. +// +// Create a new ensemble with: +// +// ens := kit.NewEnsemble() +// +// Create full nodes and miners: +// +// var full TestFullNode +// var miner TestMiner +// ens.FullNode(&full, opts...) // populates a full node +// ens.Miner(&miner, &full, opts...) // populates a miner, using the full node as its chain daemon +// +// It is possible to pass functional options to set initial balances, +// presealed sectors, owner keys, etc. +// +// After the initial nodes are added, call `ens.Start()` to forge genesis +// and start the network. Mining will NOT be started automatically. It needs +// to be started explicitly by calling `BeginMining`. +// +// Nodes also need to be connected with one another, either via `ens.Connect()` +// or `ens.InterconnectAll()`. A common inchantation for simple tests is to do: +// +// ens.InterconnectAll().BeginMining(blocktime) +// +// You can continue to add more nodes, but you must always follow with +// `ens.Start()` to activate the new nodes. +// +// The API is chainable, so it's possible to do a lot in a very succinct way: +// +// kit.NewEnsemble().FullNode(&full).Miner(&miner, &full).Start().InterconnectAll().BeginMining() +// +// You can also find convenient fullnode:miner presets, such as 1:1, 1:2, +// and 2:1, e.g.: +// +// kit.EnsembleMinimal() +// kit.EnsembleOneTwo() +// kit.EnsembleTwoOne() +// +type Ensemble struct { + t *testing.T + bootstrapped bool + genesisBlock bytes.Buffer + mn mocknet.Mocknet + options *ensembleOpts + + inactive struct { + fullnodes []*TestFullNode + miners []*TestMiner + } + active struct { + fullnodes []*TestFullNode + miners []*TestMiner + bms map[*TestMiner]*BlockMiner + } + genesis struct { + miners []genesis.Miner + accounts []genesis.Actor + } +} + +// NewEnsemble instantiates a new blank Ensemble. +func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble { + options := DefaultEnsembleOpts + for _, o := range opts { + err := o(&options) + require.NoError(t, err) + } + + n := &Ensemble{t: t, options: &options} + n.active.bms = make(map[*TestMiner]*BlockMiner) + + // add accounts from ensemble options to genesis. + for _, acc := range options.accounts { + n.genesis.accounts = append(n.genesis.accounts, genesis.Actor{ + Type: genesis.TAccount, + Balance: acc.initialBalance, + Meta: (&genesis.AccountMeta{Owner: acc.key.Address}).ActorMeta(), + }) + } + + return n +} + +// FullNode enrolls a new full node. +func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble { + options := DefaultNodeOpts + for _, o := range opts { + err := o(&options) + require.NoError(n.t, err) + } + + key, err := wallet.GenerateKey(types.KTBLS) + require.NoError(n.t, err) + + if !n.bootstrapped && !options.balance.IsZero() { + // if we still haven't forged genesis, create a key+address, and assign + // it some FIL; this will be set as the default wallet when the node is + // started. + genacc := genesis.Actor{ + Type: genesis.TAccount, + Balance: options.balance, + Meta: (&genesis.AccountMeta{Owner: key.Address}).ActorMeta(), + } + + n.genesis.accounts = append(n.genesis.accounts, genacc) + } + + *full = TestFullNode{t: n.t, options: options, DefaultKey: key} + n.inactive.fullnodes = append(n.inactive.fullnodes, full) + return n +} + +// Miner enrolls a new miner, using the provided full node for chain +// interactions. +func (n *Ensemble) Miner(miner *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble { + require.NotNil(n.t, full, "full node required when instantiating miner") + + options := DefaultNodeOpts + for _, o := range opts { + err := o(&options) + require.NoError(n.t, err) + } + + privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader) + require.NoError(n.t, err) + + peerId, err := peer.IDFromPrivateKey(privkey) + require.NoError(n.t, err) + + tdir, err := ioutil.TempDir("", "preseal-memgen") + require.NoError(n.t, err) + + minerCnt := len(n.inactive.miners) + len(n.active.miners) + + actorAddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(minerCnt)) + require.NoError(n.t, err) + + if options.mainMiner != nil { + actorAddr = options.mainMiner.ActorAddr + } + + ownerKey := options.ownerKey + if !n.bootstrapped { + var ( + sectors = options.sectors + k *types.KeyInfo + genm *genesis.Miner + ) + + // create the preseal commitment. + if n.options.mockProofs { + genm, k, err = mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, actorAddr, sectors) + } else { + genm, k, err = seed.PreSeal(actorAddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, sectors, tdir, []byte("make genesis mem random"), nil, true) + } + require.NoError(n.t, err) + + genm.PeerId = peerId + + // create an owner key, and assign it some FIL. + ownerKey, err = wallet.NewKey(*k) + require.NoError(n.t, err) + + genacc := genesis.Actor{ + Type: genesis.TAccount, + Balance: options.balance, + Meta: (&genesis.AccountMeta{Owner: ownerKey.Address}).ActorMeta(), + } + + n.genesis.miners = append(n.genesis.miners, *genm) + n.genesis.accounts = append(n.genesis.accounts, genacc) + } else { + require.NotNil(n.t, ownerKey, "worker key can't be null if initializing a miner after genesis") + } + + rl, err := net.Listen("tcp", "127.0.0.1:") + require.NoError(n.t, err) + + *miner = TestMiner{ + t: n.t, + ActorAddr: actorAddr, + OwnerKey: ownerKey, + FullNode: full, + PresealDir: tdir, + options: options, + RemoteListener: rl, + } + + miner.Libp2p.PeerID = peerId + miner.Libp2p.PrivKey = privkey + + n.inactive.miners = append(n.inactive.miners, miner) + + return n +} + +// Start starts all enrolled nodes. +func (n *Ensemble) Start() *Ensemble { + ctx := context.Background() + + var gtempl *genesis.Template + if !n.bootstrapped { + // We haven't been bootstrapped yet, we need to generate genesis and + // create the networking backbone. + gtempl = n.generateGenesis() + n.mn = mocknet.New(ctx) + } + + // --------------------- + // FULL NODES + // --------------------- + + // Create all inactive full nodes. + for i, full := range n.inactive.fullnodes { + r := repo.NewMemory(nil) + opts := []node.Option{ + node.FullAPI(&full.FullNode, node.Lite(full.options.lite)), + node.Base(), + node.Repo(r), + node.MockHost(n.mn), + node.Test(), + + // so that we subscribe to pubsub topics immediately + node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)), + } + + // append any node builder options. + opts = append(opts, full.options.extraNodeOpts...) + + // Either generate the genesis or inject it. + if i == 0 && !n.bootstrapped { + opts = append(opts, node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&n.genesisBlock, *gtempl))) + } else { + opts = append(opts, node.Override(new(modules.Genesis), modules.LoadGenesis(n.genesisBlock.Bytes()))) + } + + // Are we mocking proofs? + if n.options.mockProofs { + opts = append(opts, + node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), + ) + } + + // Call option builders, passing active nodes as the parameter + for _, bopt := range full.options.optBuilders { + opts = append(opts, bopt(n.active.fullnodes)) + } + + // Construct the full node. + stop, err := node.New(ctx, opts...) + + require.NoError(n.t, err) + + addr, err := full.WalletImport(context.Background(), &full.DefaultKey.KeyInfo) + require.NoError(n.t, err) + + err = full.WalletSetDefault(context.Background(), addr) + require.NoError(n.t, err) + + // Are we hitting this node through its RPC? + if full.options.rpc { + withRPC := fullRpc(n.t, full) + n.inactive.fullnodes[i] = withRPC + } + + n.t.Cleanup(func() { _ = stop(context.Background()) }) + + n.active.fullnodes = append(n.active.fullnodes, full) + } + + // If we are here, we have processed all inactive fullnodes and moved them + // to active, so clear the slice. + n.inactive.fullnodes = n.inactive.fullnodes[:0] + + // Link all the nodes. + err := n.mn.LinkAll() + require.NoError(n.t, err) + + // --------------------- + // MINERS + // --------------------- + + // Create all inactive miners. + for i, m := range n.inactive.miners { + if n.bootstrapped { + if m.options.mainMiner == nil { + // this is a miner created after genesis, so it won't have a preseal. + // we need to create it on chain. + params, aerr := actors.SerializeParams(&power2.CreateMinerParams{ + Owner: m.OwnerKey.Address, + Worker: m.OwnerKey.Address, + SealProofType: m.options.proofType, + Peer: abi.PeerID(m.Libp2p.PeerID), + }) + require.NoError(n.t, aerr) + + createStorageMinerMsg := &types.Message{ + From: m.OwnerKey.Address, + To: power.Address, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + require.NoError(n.t, err) + + mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) + + var retval power2.CreateMinerReturn + err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)) + require.NoError(n.t, err, "failed to create miner") + + m.ActorAddr = retval.IDAddress + } else { + params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) + + msg := &types.Message{ + To: m.options.mainMiner.ActorAddr, + From: m.options.mainMiner.OwnerKey.Address, + Method: miner.Methods.ChangePeerID, + Params: params, + Value: types.NewInt(0), + } + + signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, nil) + require.NoError(n.t, err2) + + mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(n.t, err2) + require.Equal(n.t, exitcode.Ok, mw.Receipt.ExitCode) + } + } + + has, err := m.FullNode.WalletHas(ctx, m.OwnerKey.Address) + require.NoError(n.t, err) + + // Only import the owner's full key into our companion full node, if we + // don't have it still. + if !has { + _, err = m.FullNode.WalletImport(ctx, &m.OwnerKey.KeyInfo) + require.NoError(n.t, err) + } + + // // Set it as the default address. + // err = m.FullNode.WalletSetDefault(ctx, m.OwnerAddr.Address) + // require.NoError(n.t, err) + + r := repo.NewMemory(nil) + + lr, err := r.Lock(repo.StorageMiner) + require.NoError(n.t, err) + + c, err := lr.Config() + require.NoError(n.t, err) + + cfg, ok := c.(*config.StorageMiner) + if !ok { + n.t.Fatalf("invalid config from repo, got: %T", c) + } + cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String() + cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets) + cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining) + cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing) + cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage) + + if m.options.mainMiner != nil { + token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions) + require.NoError(n.t, err) + + cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr) + cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr) + + fmt.Println("config for market node, setting SectorIndexApiInfo to: ", cfg.Subsystems.SectorIndexApiInfo) + fmt.Println("config for market node, setting SealerApiInfo to: ", cfg.Subsystems.SealerApiInfo) + } + + err = lr.SetConfig(func(raw interface{}) { + rcfg := raw.(*config.StorageMiner) + *rcfg = *cfg + }) + require.NoError(n.t, err) + + ks, err := lr.KeyStore() + require.NoError(n.t, err) + + pk, err := m.Libp2p.PrivKey.Bytes() + require.NoError(n.t, err) + + err = ks.Put("libp2p-host", types.KeyInfo{ + Type: "libp2p-host", + PrivateKey: pk, + }) + require.NoError(n.t, err) + + ds, err := lr.Datastore(context.TODO(), "/metadata") + require.NoError(n.t, err) + + err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) + require.NoError(n.t, err) + + nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) + for i := 0; i < m.options.sectors; i++ { + _, err := nic.Next() + require.NoError(n.t, err) + } + _, err = nic.Next() + require.NoError(n.t, err) + + err = lr.Close() + require.NoError(n.t, err) + + if m.options.mainMiner == nil { + enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(m.Libp2p.PeerID)}) + require.NoError(n.t, err) + + msg := &types.Message{ + From: m.OwnerKey.Address, + To: m.ActorAddr, + Method: miner.Methods.ChangePeerID, + Params: enc, + Value: types.NewInt(0), + } + + _, err2 := m.FullNode.MpoolPushMessage(ctx, msg, nil) + require.NoError(n.t, err2) + } + + var mineBlock = make(chan lotusminer.MineReq) + opts := []node.Option{ + node.StorageMiner(&m.StorageMiner, cfg.Subsystems), + node.Base(), + node.Repo(r), + node.Test(), + + node.If(!m.options.disableLibp2p, node.MockHost(n.mn)), + + node.Override(new(v1api.FullNode), m.FullNode.FullNode), + node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)), + + // disable resource filtering so that local worker gets assigned tasks + // regardless of system pressure. + node.Override(new(sectorstorage.SealerConfig), func() sectorstorage.SealerConfig { + scfg := config.DefaultStorageMiner() + scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled + return scfg.Storage + }), + } + + // append any node builder options. + opts = append(opts, m.options.extraNodeOpts...) + + idAddr, err := address.IDFromAddress(m.ActorAddr) + require.NoError(n.t, err) + + // preload preseals if the network still hasn't bootstrapped. + var presealSectors []abi.SectorID + if !n.bootstrapped { + sectors := n.genesis.miners[i].Sectors + for _, sector := range sectors { + presealSectors = append(presealSectors, abi.SectorID{ + Miner: abi.ActorID(idAddr), + Number: sector.SectorID, + }) + } + } + + if n.options.mockProofs { + opts = append(opts, + node.Override(new(*mock.SectorMgr), func() (*mock.SectorMgr, error) { + return mock.NewMockSectorMgr(presealSectors), nil + }), + node.Override(new(sectorstorage.SectorManager), node.From(new(*mock.SectorMgr))), + node.Override(new(sectorstorage.Unsealer), node.From(new(*mock.SectorMgr))), + node.Override(new(sectorstorage.PieceProvider), node.From(new(*mock.SectorMgr))), + + node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), + node.Override(new(ffiwrapper.Prover), mock.MockProver), + node.Unset(new(*sectorstorage.Manager)), + ) + } + + // start node + stop, err := node.New(ctx, opts...) + require.NoError(n.t, err) + + // using real proofs, therefore need real sectors. + if !n.bootstrapped && !n.options.mockProofs { + err := m.StorageAddLocal(ctx, m.PresealDir) + require.NoError(n.t, err) + } + + n.t.Cleanup(func() { _ = stop(context.Background()) }) + + // Are we hitting this node through its RPC? + if m.options.rpc { + withRPC := minerRpc(n.t, m) + n.inactive.miners[i] = withRPC + } + + mineOne := func(ctx context.Context, req lotusminer.MineReq) error { + select { + case mineBlock <- req: + return nil + case <-ctx.Done(): + return ctx.Err() + } + } + + m.MineOne = mineOne + m.Stop = stop + + n.active.miners = append(n.active.miners, m) + } + + // If we are here, we have processed all inactive miners and moved them + // to active, so clear the slice. + n.inactive.miners = n.inactive.miners[:0] + + // Link all the nodes. + err = n.mn.LinkAll() + require.NoError(n.t, err) + + if !n.bootstrapped && len(n.active.miners) > 0 { + // We have *just* bootstrapped, so mine 2 blocks to setup some CE stuff in some actors + var wait sync.Mutex + wait.Lock() + + observer := n.active.fullnodes[0] + + bm := NewBlockMiner(n.t, n.active.miners[0]) + n.t.Cleanup(bm.Stop) + + bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) { + wait.Unlock() + }) + wait.Lock() + bm.MineUntilBlock(ctx, observer, func(epoch abi.ChainEpoch) { + wait.Unlock() + }) + wait.Lock() + } + + n.bootstrapped = true + return n +} + +// InterconnectAll connects all miners and full nodes to one another. +func (n *Ensemble) InterconnectAll() *Ensemble { + // connect full nodes to miners. + for _, from := range n.active.fullnodes { + for _, to := range n.active.miners { + // []*TestMiner to []api.CommonAPI type coercion not possible + // so cannot use variadic form. + n.Connect(from, to) + } + } + + // connect full nodes between each other, skipping ourselves. + last := len(n.active.fullnodes) - 1 + for i, from := range n.active.fullnodes { + if i == last { + continue + } + for _, to := range n.active.fullnodes[i+1:] { + n.Connect(from, to) + } + } + return n +} + +// Connect connects one full node to the provided full nodes. +func (n *Ensemble) Connect(from api.Common, to ...api.Common) *Ensemble { + addr, err := from.NetAddrsListen(context.Background()) + require.NoError(n.t, err) + + for _, other := range to { + err = other.NetConnect(context.Background(), addr) + require.NoError(n.t, err) + } + return n +} + +// BeginMining kicks off mining for the specified miners. If nil or 0-length, +// it will kick off mining for all enrolled and active miners. It also adds a +// cleanup function to stop all mining operations on test teardown. +func (n *Ensemble) BeginMining(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner { + ctx := context.Background() + + // wait one second to make sure that nodes are connected and have handshaken. + // TODO make this deterministic by listening to identify events on the + // libp2p eventbus instead (or something else). + time.Sleep(1 * time.Second) + + var bms []*BlockMiner + if len(miners) == 0 { + // no miners have been provided explicitly, instantiate block miners + // for all active miners that aren't still mining. + for _, m := range n.active.miners { + if _, ok := n.active.bms[m]; ok { + continue // skip, already have a block miner + } + miners = append(miners, m) + } + } + + for _, m := range miners { + bm := NewBlockMiner(n.t, m) + bm.MineBlocks(ctx, blocktime) + n.t.Cleanup(bm.Stop) + + bms = append(bms, bm) + + n.active.bms[m] = bm + } + + return bms +} + +func (n *Ensemble) generateGenesis() *genesis.Template { + var verifRoot = gen.DefaultVerifregRootkeyActor + if k := n.options.verifiedRoot.key; k != nil { + verifRoot = genesis.Actor{ + Type: genesis.TAccount, + Balance: n.options.verifiedRoot.initialBalance, + Meta: (&genesis.AccountMeta{Owner: k.Address}).ActorMeta(), + } + } + + templ := &genesis.Template{ + NetworkVersion: network.Version0, + Accounts: n.genesis.accounts, + Miners: n.genesis.miners, + NetworkName: "test", + Timestamp: uint64(time.Now().Unix() - int64(n.options.pastOffset.Seconds())), + VerifregRootKey: verifRoot, + RemainderAccount: gen.DefaultRemainderAccountActor, + } + + return templ +} diff --git a/itests/kit/ensemble_opts.go b/itests/kit/ensemble_opts.go new file mode 100644 index 000000000..440362ed1 --- /dev/null +++ b/itests/kit/ensemble_opts.go @@ -0,0 +1,55 @@ +package kit + +import ( + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/wallet" +) + +type EnsembleOpt func(opts *ensembleOpts) error + +type genesisAccount struct { + key *wallet.Key + initialBalance abi.TokenAmount +} + +type ensembleOpts struct { + pastOffset time.Duration + verifiedRoot genesisAccount + accounts []genesisAccount + mockProofs bool +} + +var DefaultEnsembleOpts = ensembleOpts{ + pastOffset: 10000000 * time.Second, // time sufficiently in the past to trigger catch-up mining. +} + +// MockProofs activates mock proofs for the entire ensemble. +func MockProofs() EnsembleOpt { + return func(opts *ensembleOpts) error { + opts.mockProofs = true + return nil + } +} + +// RootVerifier specifies the key to be enlisted as the verified registry root, +// as well as the initial balance to be attributed during genesis. +func RootVerifier(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt { + return func(opts *ensembleOpts) error { + opts.verifiedRoot.key = key + opts.verifiedRoot.initialBalance = balance + return nil + } +} + +// Account sets up an account at genesis with the specified key and balance. +func Account(key *wallet.Key, balance abi.TokenAmount) EnsembleOpt { + return func(opts *ensembleOpts) error { + opts.accounts = append(opts.accounts, genesisAccount{ + key: key, + initialBalance: balance, + }) + return nil + } +} diff --git a/itests/kit/ensemble_presets.go b/itests/kit/ensemble_presets.go new file mode 100644 index 000000000..b7ff80aa1 --- /dev/null +++ b/itests/kit/ensemble_presets.go @@ -0,0 +1,102 @@ +package kit + +import ( + "testing" + "time" +) + +// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner. +// It does not interconnect nodes nor does it begin mining. +// +// This function supports passing both ensemble and node functional options. +// Functional options are applied to all nodes. +func EnsembleMinimal(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + + eopts, nopts := siftOptions(t, opts) + + var ( + full TestFullNode + miner TestMiner + ) + ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Miner(&miner, &full, nopts...).Start() + return &full, &miner, ens +} + +func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) { + eopts, nopts := siftOptions(t, opts) + + var ( + fullnode TestFullNode + main, market TestMiner + ) + + mainNodeOpts := []NodeOpt{WithSubsystems(SSealing, SSectorStorage, SMining), DisableLibp2p()} + mainNodeOpts = append(mainNodeOpts, nopts...) + + blockTime := 100 * time.Millisecond + ens := NewEnsemble(t, eopts...).FullNode(&fullnode, nopts...).Miner(&main, &fullnode, mainNodeOpts...).Start() + ens.BeginMining(blockTime) + + marketNodeOpts := []NodeOpt{OwnerAddr(fullnode.DefaultKey), MainMiner(&main), WithSubsystems(SMarkets)} + marketNodeOpts = append(marketNodeOpts, nopts...) + + ens.Miner(&market, &fullnode, marketNodeOpts...).Start().Connect(market, fullnode) + + return &fullnode, &main, &market, ens +} + +// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner. +// It does not interconnect nodes nor does it begin mining. +// +// This function supports passing both ensemble and node functional options. +// Functional options are applied to all nodes. +func EnsembleTwoOne(t *testing.T, opts ...interface{}) (*TestFullNode, *TestFullNode, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + + eopts, nopts := siftOptions(t, opts) + + var ( + one, two TestFullNode + miner TestMiner + ) + ens := NewEnsemble(t, eopts...).FullNode(&one, nopts...).FullNode(&two, nopts...).Miner(&miner, &one, nopts...).Start() + return &one, &two, &miner, ens +} + +// EnsembleOneTwo creates and starts an Ensemble with one full node and two miners. +// It does not interconnect nodes nor does it begin mining. +// +// This function supports passing both ensemble and node functional options. +// Functional options are applied to all nodes. +func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) { + opts = append(opts, WithAllSubsystems()) + + eopts, nopts := siftOptions(t, opts) + + var ( + full TestFullNode + one, two TestMiner + ) + ens := NewEnsemble(t, eopts...). + FullNode(&full, nopts...). + Miner(&one, &full, nopts...). + Miner(&two, &full, nopts...). + Start() + + return &full, &one, &two, ens +} + +func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) { + for _, v := range opts { + switch o := v.(type) { + case EnsembleOpt: + eopts = append(eopts, o) + case NodeOpt: + nopts = append(nopts, o) + default: + t.Fatalf("invalid option type: %T", o) + } + } + return eopts, nopts +} diff --git a/itests/kit/files.go b/itests/kit/files.go new file mode 100644 index 000000000..48592b518 --- /dev/null +++ b/itests/kit/files.go @@ -0,0 +1,58 @@ +package kit + +import ( + "bytes" + "io" + "math/rand" + "os" + "testing" + + "github.com/minio/blake2b-simd" + + "github.com/stretchr/testify/require" +) + +// CreateRandomFile creates a random file with the provided seed and the +// provided size. +func CreateRandomFile(t *testing.T, rseed, size int) (path string) { + if size == 0 { + size = 1600 + } + + source := io.LimitReader(rand.New(rand.NewSource(int64(rseed))), int64(size)) + + file, err := os.CreateTemp(t.TempDir(), "sourcefile.dat") + require.NoError(t, err) + + n, err := io.Copy(file, source) + require.NoError(t, err) + require.EqualValues(t, n, size) + + return file.Name() +} + +// AssertFilesEqual compares two files by blake2b hash equality and +// fails the test if unequal. +func AssertFilesEqual(t *testing.T, left, right string) { + // initialize hashes. + leftH, rightH := blake2b.New256(), blake2b.New256() + + // open files. + leftF, err := os.Open(left) + require.NoError(t, err) + + rightF, err := os.Open(right) + require.NoError(t, err) + + // feed hash functions. + _, err = io.Copy(leftH, leftF) + require.NoError(t, err) + + _, err = io.Copy(rightH, rightF) + require.NoError(t, err) + + // compute digests. + leftD, rightD := leftH.Sum(nil), rightH.Sum(nil) + + require.True(t, bytes.Equal(leftD, rightD)) +} diff --git a/itests/kit/funds.go b/itests/kit/funds.go new file mode 100644 index 000000000..e49c708ea --- /dev/null +++ b/itests/kit/funds.go @@ -0,0 +1,40 @@ +package kit + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" +) + +// SendFunds sends funds from the default wallet of the specified sender node +// to the recipient address. +func SendFunds(ctx context.Context, t *testing.T, sender *TestFullNode, recipient address.Address, amount abi.TokenAmount) { + senderAddr, err := sender.WalletDefaultAddress(ctx) + require.NoError(t, err) + + msg := &types.Message{ + From: senderAddr, + To: recipient, + Value: amount, + } + + sm, err := sender.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + sender.WaitMsg(ctx, sm.Cid()) +} + +func (f *TestFullNode) WaitMsg(ctx context.Context, msg cid.Cid) { + res, err := f.StateWaitMsg(ctx, msg, 3, api.LookbackNoLimit, true) + require.NoError(f.t, err) + + require.EqualValues(f.t, 0, res.Receipt.ExitCode, "message did not successfully execute") +} diff --git a/itests/kit/init.go b/itests/kit/init.go new file mode 100644 index 000000000..dc8463cb4 --- /dev/null +++ b/itests/kit/init.go @@ -0,0 +1,32 @@ +package kit + +import ( + "fmt" + "os" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" + logging "github.com/ipfs/go-log/v2" +) + +func init() { + _ = logging.SetLogLevel("*", "INFO") + + policy.SetProviderCollateralSupplyTarget(big.Zero(), big.NewInt(1)) + + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + + build.InsecurePoStValidation = true + + if err := os.Setenv("BELLMAN_NO_GPU", "1"); err != nil { + panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err)) + } + + if err := os.Setenv("LOTUS_DISABLE_WATCHDOG", "1"); err != nil { + panic(fmt.Sprintf("failed to set LOTUS_DISABLE_WATCHDOG env variable: %s", err)) + } +} diff --git a/itests/kit/log.go b/itests/kit/log.go new file mode 100644 index 000000000..3dce3af9d --- /dev/null +++ b/itests/kit/log.go @@ -0,0 +1,19 @@ +package kit + +import ( + "github.com/filecoin-project/lotus/lib/lotuslog" + logging "github.com/ipfs/go-log/v2" +) + +func QuietMiningLogs() { + lotuslog.SetupLogLevels() + + _ = logging.SetLogLevel("miner", "ERROR") // set this to INFO to watch mining happen. + _ = logging.SetLogLevel("chainstore", "ERROR") + _ = logging.SetLogLevel("chain", "ERROR") + _ = logging.SetLogLevel("sub", "ERROR") + _ = logging.SetLogLevel("storageminer", "ERROR") + _ = logging.SetLogLevel("pubsub", "ERROR") + _ = logging.SetLogLevel("gen", "ERROR") + _ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR") +} diff --git a/cli/test/mockcli.go b/itests/kit/mockcli.go similarity index 98% rename from cli/test/mockcli.go rename to itests/kit/mockcli.go index e8eb78f1b..c0f218920 100644 --- a/cli/test/mockcli.go +++ b/itests/kit/mockcli.go @@ -1,4 +1,4 @@ -package test +package kit import ( "bytes" @@ -56,7 +56,7 @@ type MockCLIClient struct { func (c *MockCLIClient) RunCmd(input ...string) string { out, err := c.RunCmdRaw(input...) - require.NoError(c.t, err) + require.NoError(c.t, err, "output:\n%s", out) return out } diff --git a/itests/kit/node_full.go b/itests/kit/node_full.go new file mode 100644 index 000000000..83586e188 --- /dev/null +++ b/itests/kit/node_full.go @@ -0,0 +1,85 @@ +package kit + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/multiformats/go-multiaddr" + "github.com/stretchr/testify/require" +) + +// TestFullNode represents a full node enrolled in an Ensemble. +type TestFullNode struct { + v1api.FullNode + + t *testing.T + + // ListenAddr is the address on which an API server is listening, if an + // API server is created for this Node. + ListenAddr multiaddr.Multiaddr + DefaultKey *wallet.Key + + options nodeOpts +} + +// CreateImportFile creates a random file with the specified seed and size, and +// imports it into the full node. +func (f *TestFullNode) CreateImportFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, path string) { + path = CreateRandomFile(f.t, rseed, size) + res, err := f.ClientImport(ctx, api.FileRef{Path: path}) + require.NoError(f.t, err) + return res, path +} + +// WaitTillChain waits until a specified chain condition is met. It returns +// the first tipset where the condition is met. +func (f *TestFullNode) WaitTillChain(ctx context.Context, pred ChainPredicate) *types.TipSet { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + heads, err := f.ChainNotify(ctx) + require.NoError(f.t, err) + + for chg := range heads { + for _, c := range chg { + if c.Type != "apply" { + continue + } + if ts := c.Val; pred(ts) { + return ts + } + } + } + require.Fail(f.t, "chain condition not met") + return nil +} + +// ChainPredicate encapsulates a chain condition. +type ChainPredicate func(set *types.TipSet) bool + +// HeightAtLeast returns a ChainPredicate that is satisfied when the chain +// height is equal or higher to the target. +func HeightAtLeast(target abi.ChainEpoch) ChainPredicate { + return func(ts *types.TipSet) bool { + return ts.Height() >= target + } +} + +// BlockMinedBy returns a ChainPredicate that is satisfied when we observe the +// first block mined by the specified miner. +func BlockMinedBy(miner address.Address) ChainPredicate { + return func(ts *types.TipSet) bool { + for _, b := range ts.Blocks() { + if b.Miner == miner { + return true + } + } + return false + } +} diff --git a/itests/kit/node_miner.go b/itests/kit/node_miner.go new file mode 100644 index 000000000..ff406629c --- /dev/null +++ b/itests/kit/node_miner.go @@ -0,0 +1,198 @@ +package kit + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/miner" + libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/multiformats/go-multiaddr" +) + +type MinerSubsystem int + +const ( + SMarkets MinerSubsystem = 1 << iota + SMining + SSealing + SSectorStorage + + MinerSubsystems = iota +) + +func (ms MinerSubsystem) Add(single MinerSubsystem) MinerSubsystem { + return ms | single +} + +func (ms MinerSubsystem) Has(single MinerSubsystem) bool { + return ms&single == single +} + +func (ms MinerSubsystem) All() [MinerSubsystems]bool { + var out [MinerSubsystems]bool + + for i := range out { + out[i] = ms&(1< 0 + } + + return out +} + +// TestMiner represents a miner enrolled in an Ensemble. +type TestMiner struct { + api.StorageMiner + + t *testing.T + + // ListenAddr is the address on which an API server is listening, if an + // API server is created for this Node + ListenAddr multiaddr.Multiaddr + + ActorAddr address.Address + OwnerKey *wallet.Key + MineOne func(context.Context, miner.MineReq) error + Stop func(context.Context) error + + FullNode *TestFullNode + PresealDir string + + Libp2p struct { + PeerID peer.ID + PrivKey libp2pcrypto.PrivKey + } + + RemoteListener net.Listener + + options nodeOpts +} + +func (tm *TestMiner) PledgeSectors(ctx context.Context, n, existing int, blockNotif <-chan struct{}) { + toCheck := tm.StartPledge(ctx, n, existing, blockNotif) + + for len(toCheck) > 0 { + tm.FlushSealingBatches(ctx) + + states := map[api.SectorState]int{} + for n := range toCheck { + st, err := tm.StorageMiner.SectorsStatus(ctx, n, false) + require.NoError(tm.t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + tm.t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + +} + +func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} { + for i := 0; i < n; i++ { + if i%3 == 0 && blockNotif != nil { + <-blockNotif + tm.t.Log("WAIT") + } + tm.t.Logf("PLEDGING %d", i) + _, err := tm.StorageMiner.PledgeSector(ctx) + require.NoError(tm.t, err) + } + + for { + s, err := tm.StorageMiner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM + require.NoError(tm.t, err) + fmt.Printf("Sectors: %d\n", len(s)) + if len(s) >= n+existing { + break + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + fmt.Printf("All sectors is fsm\n") + + s, err := tm.StorageMiner.SectorsList(ctx) + require.NoError(tm.t, err) + + toCheck := map[abi.SectorNumber]struct{}{} + for _, number := range s { + toCheck[number] = struct{}{} + } + + return toCheck +} + +func (tm *TestMiner) FlushSealingBatches(ctx context.Context) { + pcb, err := tm.StorageMiner.SectorPreCommitFlush(ctx) + require.NoError(tm.t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) + } + + cb, err := tm.StorageMiner.SectorCommitFlush(ctx) + require.NoError(tm.t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %+v\n", cb) + } +} + +const metaFile = "sectorstore.json" + +func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, weight uint64, seal, store bool) { + p, err := ioutil.TempDir("", "lotus-testsectors-") + require.NoError(t, err) + + if err := os.MkdirAll(p, 0755); err != nil { + if !os.IsExist(err) { + require.NoError(t, err) + } + } + + _, err = os.Stat(filepath.Join(p, metaFile)) + if !os.IsNotExist(err) { + require.NoError(t, err) + } + + cfg := &stores.LocalStorageMeta{ + ID: stores.ID(uuid.New().String()), + Weight: weight, + CanSeal: seal, + CanStore: store, + } + + if !(cfg.CanStore || cfg.CanSeal) { + t.Fatal("must specify at least one of CanStore or cfg.CanSeal") + } + + b, err := json.MarshalIndent(cfg, "", " ") + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(p, metaFile), b, 0644) + require.NoError(t, err) + + err = tm.StorageAddLocal(ctx, p) + require.NoError(t, err) +} diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go new file mode 100644 index 000000000..87707aa16 --- /dev/null +++ b/itests/kit/node_opts.go @@ -0,0 +1,145 @@ +package kit + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/node" +) + +// DefaultPresealsPerBootstrapMiner is the number of preseals that every +// bootstrap miner has by default. It can be overridden through the +// PresealSectors option. +const DefaultPresealsPerBootstrapMiner = 2 + +const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + +// nodeOpts is an options accumulating struct, where functional options are +// merged into. +type nodeOpts struct { + balance abi.TokenAmount + lite bool + sectors int + rpc bool + ownerKey *wallet.Key + extraNodeOpts []node.Option + + subsystems MinerSubsystem + mainMiner *TestMiner + disableLibp2p bool + optBuilders []OptBuilder + proofType abi.RegisteredSealProof +} + +// DefaultNodeOpts are the default options that will be applied to test nodes. +var DefaultNodeOpts = nodeOpts{ + balance: big.Mul(big.NewInt(100000000), types.NewInt(build.FilecoinPrecision)), + sectors: DefaultPresealsPerBootstrapMiner, + proofType: abi.RegisteredSealProof_StackedDrg2KiBV1_1, // default _concrete_ proof type for non-genesis miners (notice the _1) for new actors versions. +} + +// OptBuilder is used to create an option after some other node is already +// active. Takes all active nodes as a parameter. +type OptBuilder func(activeNodes []*TestFullNode) node.Option + +// NodeOpt is a functional option for test nodes. +type NodeOpt func(opts *nodeOpts) error + +func WithAllSubsystems() NodeOpt { + return func(opts *nodeOpts) error { + opts.subsystems = opts.subsystems.Add(SMarkets) + opts.subsystems = opts.subsystems.Add(SMining) + opts.subsystems = opts.subsystems.Add(SSealing) + opts.subsystems = opts.subsystems.Add(SSectorStorage) + + return nil + } +} + +func WithSubsystems(systems ...MinerSubsystem) NodeOpt { + return func(opts *nodeOpts) error { + for _, s := range systems { + opts.subsystems = opts.subsystems.Add(s) + } + return nil + } +} + +func DisableLibp2p() NodeOpt { + return func(opts *nodeOpts) error { + opts.disableLibp2p = true + return nil + } +} + +func MainMiner(m *TestMiner) NodeOpt { + return func(opts *nodeOpts) error { + opts.mainMiner = m + return nil + } +} + +// OwnerBalance specifies the balance to be attributed to a miner's owner +// account. Only relevant when creating a miner. +func OwnerBalance(balance abi.TokenAmount) NodeOpt { + return func(opts *nodeOpts) error { + opts.balance = balance + return nil + } +} + +// LiteNode specifies that this node will be a lite node. Only relevant when +// creating a fullnode. +func LiteNode() NodeOpt { + return func(opts *nodeOpts) error { + opts.lite = true + return nil + } +} + +// PresealSectors specifies the amount of preseal sectors to give to a miner +// at genesis. Only relevant when creating a miner. +func PresealSectors(sectors int) NodeOpt { + return func(opts *nodeOpts) error { + opts.sectors = sectors + return nil + } +} + +// ThroughRPC makes interactions with this node throughout the test flow through +// the JSON-RPC API. +func ThroughRPC() NodeOpt { + return func(opts *nodeOpts) error { + opts.rpc = true + return nil + } +} + +// OwnerAddr sets the owner address of a miner. Only relevant when creating +// a miner. +func OwnerAddr(wk *wallet.Key) NodeOpt { + return func(opts *nodeOpts) error { + opts.ownerKey = wk + return nil + } +} + +// ConstructorOpts are Lotus node constructor options that are passed as-is to +// the node. +func ConstructorOpts(extra ...node.Option) NodeOpt { + return func(opts *nodeOpts) error { + opts.extraNodeOpts = extra + return nil + } +} + +// ProofType sets the proof type for this node. If you're using new actor +// versions, this should be a _1 proof type. +func ProofType(proofType abi.RegisteredSealProof) NodeOpt { + return func(opts *nodeOpts) error { + opts.proofType = proofType + return nil + } +} diff --git a/itests/kit/node_opts_nv.go b/itests/kit/node_opts_nv.go new file mode 100644 index 000000000..d4c84b4f1 --- /dev/null +++ b/itests/kit/node_opts_nv.go @@ -0,0 +1,90 @@ +package kit + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node" + "github.com/ipfs/go-cid" +) + +// DefaultTestUpgradeSchedule +var DefaultTestUpgradeSchedule = stmgr.UpgradeSchedule{{ + Network: network.Version9, + Height: 1, + Migration: stmgr.UpgradeActorsV2, +}, { + Network: network.Version10, + Height: 2, + Migration: stmgr.UpgradeActorsV3, +}, { + Network: network.Version12, + Height: 3, + Migration: stmgr.UpgradeActorsV4, +}, { + Network: network.Version13, + Height: 4, + Migration: stmgr.UpgradeActorsV5, +}} + +func LatestActorsAt(upgradeHeight abi.ChainEpoch) node.Option { + // Attention: Update this when introducing new actor versions or your tests will be sad + return NetworkUpgradeAt(network.Version13, upgradeHeight) +} + +// InstantaneousNetworkVersion starts the network instantaneously at the +// specified version in height 1. +func InstantaneousNetworkVersion(version network.Version) node.Option { + // composes all migration functions + var mf stmgr.MigrationFunc = func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error) { + var state = oldState + for _, u := range DefaultTestUpgradeSchedule { + if u.Network > version { + break + } + state, err = u.Migration(ctx, sm, cache, cb, state, height, ts) + if err != nil { + return cid.Undef, err + } + } + return state, nil + } + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{ + {Network: version, Height: 1, Migration: mf}, + }) +} + +func NetworkUpgradeAt(version network.Version, upgradeHeight abi.ChainEpoch) node.Option { + schedule := stmgr.UpgradeSchedule{} + for _, upgrade := range DefaultTestUpgradeSchedule { + if upgrade.Network > version { + break + } + + schedule = append(schedule, upgrade) + } + + if upgradeHeight > 0 { + schedule[len(schedule)-1].Height = upgradeHeight + } + + return node.Override(new(stmgr.UpgradeSchedule), schedule) +} + +func SDRUpgradeAt(calico, persian abi.ChainEpoch) node.Option { + return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ + Network: network.Version6, + Height: 1, + Migration: stmgr.UpgradeActorsV2, + }, { + Network: network.Version7, + Height: calico, + Migration: stmgr.UpgradeCalico, + }, { + Network: network.Version8, + Height: persian, + }}) +} diff --git a/itests/kit/rpc.go b/itests/kit/rpc.go new file mode 100644 index 000000000..35153eb64 --- /dev/null +++ b/itests/kit/rpc.go @@ -0,0 +1,65 @@ +package kit + +import ( + "context" + "fmt" + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/filecoin-project/lotus/api/client" + "github.com/filecoin-project/lotus/node" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/stretchr/testify/require" +) + +func CreateRPCServer(t *testing.T, handler http.Handler, listener net.Listener) (*httptest.Server, multiaddr.Multiaddr) { + testServ := &httptest.Server{ + Listener: listener, + Config: &http.Server{Handler: handler}, + } + testServ.Start() + + t.Cleanup(testServ.Close) + t.Cleanup(testServ.CloseClientConnections) + + addr := testServ.Listener.Addr() + maddr, err := manet.FromNetAddr(addr) + require.NoError(t, err) + return testServ, maddr +} + +func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode { + handler, err := node.FullNodeHandler(f.FullNode, false) + require.NoError(t, err) + + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv, maddr := CreateRPCServer(t, handler, l) + + cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) + require.NoError(t, err) + t.Cleanup(stop) + f.ListenAddr, f.FullNode = maddr, cl + + return f +} + +func minerRpc(t *testing.T, m *TestMiner) *TestMiner { + handler, err := node.MinerHandler(m.StorageMiner, false) + require.NoError(t, err) + + srv, maddr := CreateRPCServer(t, handler, m.RemoteListener) + + fmt.Println("creating RPC server for", m.ActorAddr, "at: ", srv.Listener.Addr().String()) + url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0" + cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil) + require.NoError(t, err) + t.Cleanup(stop) + + m.ListenAddr, m.StorageMiner = maddr, cl + return m +} diff --git a/itests/kit/run.go b/itests/kit/run.go new file mode 100644 index 000000000..713efa3b8 --- /dev/null +++ b/itests/kit/run.go @@ -0,0 +1,20 @@ +package kit + +import ( + "os" + "testing" +) + +// EnvRunExpensiveTests is the environment variable that needs to be present +// and set to value "1" to enable running expensive tests outside of CI. +const EnvRunExpensiveTests = "LOTUS_RUN_EXPENSIVE_TESTS" + +// Expensive marks a test as expensive, skipping it immediately if not running an +func Expensive(t *testing.T) { + switch { + case os.Getenv("CI") == "true": + return + case os.Getenv(EnvRunExpensiveTests) != "1": + t.Skipf("skipping expensive test outside of CI; enable by setting env var %s=1", EnvRunExpensiveTests) + } +} diff --git a/cli/test/multisig.go b/itests/multisig/suite.go similarity index 86% rename from cli/test/multisig.go rename to itests/multisig/suite.go index 5a60894e6..86a8ab738 100644 --- a/cli/test/multisig.go +++ b/itests/multisig/suite.go @@ -1,4 +1,4 @@ -package test +package multisig import ( "context" @@ -8,28 +8,27 @@ import ( "testing" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/itests/kit" "github.com/stretchr/testify/require" - lcli "github.com/urfave/cli/v2" ) -func RunMultisigTest(t *testing.T, cmds []*lcli.Command, clientNode test.TestNode) { - ctx := context.Background() - +func RunMultisigTests(t *testing.T, client *kit.TestFullNode) { // Create mock CLI - mockCLI := NewMockCLI(ctx, t, cmds) - clientCLI := mockCLI.Client(clientNode.ListenAddr) + ctx := context.Background() + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) + clientCLI := mockCLI.Client(client.ListenAddr) // Create some wallets on the node to use for testing multisig var walletAddrs []address.Address for i := 0; i < 4; i++ { - addr, err := clientNode.WalletNew(ctx, types.KTSecp256k1) + addr, err := client.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) walletAddrs = append(walletAddrs, addr) - test.SendFunds(ctx, t, clientNode, addr, types.NewInt(1e15)) + kit.SendFunds(ctx, t, client, addr, types.NewInt(1e15)) } // Create an msig with three of the addresses and threshold of two sigs diff --git a/itests/multisig_test.go b/itests/multisig_test.go new file mode 100644 index 000000000..9a15e8c0e --- /dev/null +++ b/itests/multisig_test.go @@ -0,0 +1,20 @@ +package itests + +import ( + "testing" + "time" + + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/itests/multisig" +) + +// TestMultisig does a basic test to exercise the multisig CLI commands +func TestMultisig(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 5 * time.Millisecond + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + ens.InterconnectAll().BeginMining(blockTime) + + multisig.RunMultisigTests(t, client) +} diff --git a/itests/nonce_test.go b/itests/nonce_test.go new file mode 100644 index 000000000..b50fcbe26 --- /dev/null +++ b/itests/nonce_test.go @@ -0,0 +1,57 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestNonceIncremental(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + // create a new address where to send funds. + addr, err := client.WalletNew(ctx, types.KTBLS) + require.NoError(t, err) + + // get the existing balance from the default wallet to then split it. + bal, err := client.WalletBalance(ctx, client.DefaultKey.Address) + require.NoError(t, err) + + const iterations = 100 + + // we'll send half our balance (saving the other half for gas), + // in `iterations` increments. + toSend := big.Div(bal, big.NewInt(2)) + each := big.Div(toSend, big.NewInt(iterations)) + + var sms []*types.SignedMessage + for i := 0; i < iterations; i++ { + msg := &types.Message{ + From: client.DefaultKey.Address, + To: addr, + Value: each, + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + require.EqualValues(t, i, sm.Message.Nonce) + + sms = append(sms, sm) + } + + for _, sm := range sms { + _, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + } +} diff --git a/api/test/paych.go b/itests/paych_api_test.go similarity index 64% rename from api/test/paych.go rename to itests/paych_api_test.go index 93a083c4a..647db21e0 100644 --- a/api/test/paych.go +++ b/itests/paych_api_test.go @@ -1,15 +1,15 @@ -package test +package itests import ( "context" - "fmt" - "sync/atomic" "testing" "time" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/itests/kit" "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" cbor "github.com/ipfs/go-ipld-cbor" @@ -26,64 +26,49 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { +func TestPaymentChannelsAPI(t *testing.T) { + kit.QuietMiningLogs() + ctx := context.Background() - n, sn := b(t, TwoFull, OneMiner) + blockTime := 5 * time.Millisecond - paymentCreator := n[0] - paymentReceiver := n[1] - miner := sn[0] + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + miner kit.TestMiner + ) - // get everyone connected - addrs, err := paymentCreator.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := paymentReceiver.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - // start mining blocks - bm := NewBlockMiner(ctx, t, miner, blocktime) - bm.MineBlocks() + ens := kit.NewEnsemble(t, kit.MockProofs()). + FullNode(&paymentCreator). + FullNode(&paymentReceiver). + Miner(&miner, &paymentCreator, kit.WithAllSubsystems()). + Start(). + InterconnectAll() + bms := ens.BeginMining(blockTime) + bm := bms[0] // send some funds to register the receiver receiverAddr, err := paymentReceiver.WalletNew(ctx, types.KTSecp256k1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) + kit.SendFunds(ctx, t, &paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) // setup the payment channel createrAddr, err := paymentCreator.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) channelAmt := int64(7000) channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // allocate three lanes var lanes []uint64 for i := 0; i < 3; i++ { lane, err := paymentCreator.PaychAllocateLane(ctx, channel) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) lanes = append(lanes, lane) } @@ -92,45 +77,28 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { // supersedes the voucher with a value of 1000 for _, lane := range lanes { vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane) - if err != nil { - t.Fatal(err) - } - if vouch1.Voucher == nil { - t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall)) - } + require.NoError(t, err) + require.NotNil(t, vouch1.Voucher, "Not enough funds to create voucher: missing %d", vouch1.Shortfall) + vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane) - if err != nil { - t.Fatal(err) - } - if vouch2.Voucher == nil { - t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall)) - } + require.NoError(t, err) + require.NotNil(t, vouch2.Voucher, "Not enough funds to create voucher: missing %d", vouch2.Shortfall) + delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000)) - if err != nil { - t.Fatal(err) - } - if !delta1.Equals(abi.NewTokenAmount(1000)) { - t.Fatal("voucher didn't have the right amount") - } + require.NoError(t, err) + require.EqualValues(t, abi.NewTokenAmount(1000), delta1, "voucher didn't have the right amount") + delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000)) - if err != nil { - t.Fatal(err) - } - if !delta2.Equals(abi.NewTokenAmount(1000)) { - t.Fatal("voucher didn't have the right amount") - } + require.NoError(t, err) + require.EqualValues(t, abi.NewTokenAmount(1000), delta2, "voucher didn't have the right amount") } // settle the payment channel settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle") - if res.Receipt.ExitCode != 0 { - t.Fatal("Unable to settle payment channel") - } + require.EqualValues(t, 0, res.Receipt.ExitCode, "Unable to settle payment channel") creatorStore := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(paymentCreator))) @@ -167,87 +135,59 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { }, int(build.MessageConfidence)+1, build.Finality, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key()) }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) select { case <-finished: - case <-time.After(time.Second): + case <-time.After(10 * time.Second): t.Fatal("Timed out waiting for receiver to submit vouchers") } // Create a new voucher now that some vouchers have already been submitted vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3) - if err != nil { - t.Fatal(err) - } - if vouchRes.Voucher == nil { - t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall)) - } + require.NoError(t, err) + require.NotNil(t, vouchRes.Voucher, "Not enough funds to create voucher: missing %d", vouchRes.Shortfall) + vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000)) - if err != nil { - t.Fatal(err) - } - if !vdelta.Equals(abi.NewTokenAmount(1000)) { - t.Fatal("voucher didn't have the right amount") - } + require.NoError(t, err) + require.EqualValues(t, abi.NewTokenAmount(1000), vdelta, "voucher didn't have the right amount") // Create a new voucher whose value would exceed the channel balance excessAmt := abi.NewTokenAmount(1000) vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4) - if err != nil { - t.Fatal(err) - } - if vouchRes.Voucher != nil { - t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance") - } - if !vouchRes.Shortfall.Equals(excessAmt) { - t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall)) - } + require.NoError(t, err) + require.Nil(t, vouchRes.Voucher, "Expected not to be able to create voucher whose value would exceed channel balance") + require.EqualValues(t, excessAmt, vouchRes.Shortfall, "Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall) // Add a voucher whose value would exceed the channel balance vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1} vb, err := vouch.SigningBytes() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + vouch.Signature = sig _, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000)) - if err == nil { - t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt)) - } + require.Errorf(t, err, "Expected shortfall error of %d", excessAmt) // wait for the settlement period to pass before collecting waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, policy.PaychSettleDelay) creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // collect funds (from receiver, though either party can do it) collectMsg, err := paymentReceiver.PaychCollect(ctx, channel) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true) - if err != nil { - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatal("unable to collect on payment channel") - } + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel") // Finally, check the balance for the creator currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // The highest nonce voucher that the creator sent on each lane is 2000 totalVouchers := int64(len(lanes) * 2000) @@ -257,15 +197,10 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { // channel amount - total voucher value expectedRefund := channelAmt - totalVouchers delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance) - if !delta.Equals(abi.NewTokenAmount(expectedRefund)) { - t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta) - } - - // shut down mining - bm.Stop() + require.EqualValues(t, abi.NewTokenAmount(expectedRefund), delta, "did not send correct funds from creator: expected %d, got %d", expectedRefund, delta) } -func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) { +func waitForBlocks(ctx context.Context, t *testing.T, bm *kit.BlockMiner, paymentReceiver kit.TestFullNode, receiverAddr address.Address, count int) { // We need to add null blocks in batches, if we add too many the chain can't sync batchSize := 60 for i := 0; i < count; i += batchSize { @@ -274,8 +209,8 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec size = count - i } - // Add a batch of null blocks - atomic.StoreInt64(&bm.nulls, int64(size-1)) + // Add a batch of null blocks to advance the chain quicker through finalities. + bm.InjectNulls(abi.ChainEpoch(size - 1)) // Add a real block m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{ @@ -283,30 +218,23 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec From: receiverAddr, Value: types.NewInt(0), }, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) _, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } } -func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup { +func waitForMessage(ctx context.Context, t *testing.T, paymentCreator kit.TestFullNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup { ctx, cancel := context.WithTimeout(ctx, duration) defer cancel() - fmt.Println("Waiting for", desc) + t.Log("Waiting for", desc) + res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true) - if err != nil { - fmt.Println("Error waiting for", desc, err) - t.Fatal(err) - } - if res.Receipt.ExitCode != 0 { - t.Fatalf("did not successfully send %s", desc) - } - fmt.Println("Confirmed", desc) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode, "did not successfully send %s", desc) + + t.Log("Confirmed", desc) return res } diff --git a/cli/paych_test.go b/itests/paych_cli_test.go similarity index 83% rename from cli/paych_test.go rename to itests/paych_cli_test.go index 44d0a41e7..82955e6c1 100644 --- a/cli/paych_test.go +++ b/itests/paych_cli_test.go @@ -1,4 +1,4 @@ -package cli +package itests import ( "context" @@ -10,45 +10,39 @@ import ( "testing" "time" - clitest "github.com/filecoin-project/lotus/cli/test" + "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - "github.com/filecoin-project/lotus/chain/actors/policy" cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lotus/api/test" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" ) -func init() { - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) -} - -// TestPaymentChannels does a basic test to exercise the payment channel CLI +// TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI // commands -func TestPaymentChannels(t *testing.T) { +func TestPaymentChannelsBasic(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - paymentReceiver := nodes[1] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) @@ -70,12 +64,16 @@ func TestPaymentChannels(t *testing.T) { // creator: paych settle creatorCLI.RunCmd("paych", "settle", chAddr.String()) + t.Log("wait for chain to reach settle height") + // Wait for the chain to reach the settle height chState := getPaychState(ctx, t, paymentReceiver, chAddr) sa, err := chState.SettlingAt() require.NoError(t, err) waitForHeight(ctx, t, paymentReceiver, sa) + t.Log("settle height reached") + // receiver: paych collect receiverCLI.RunCmd("paych", "collect", chAddr.String()) } @@ -89,17 +87,18 @@ type voucherSpec struct { // TestPaymentChannelStatus tests the payment channel status CLI command func TestPaymentChannelStatus(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) // creator: paych status-by-from-to @@ -168,18 +167,18 @@ func TestPaymentChannelStatus(t *testing.T) { // channel voucher commands func TestPaymentChannelVouchers(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - paymentReceiver := nodes[1] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) @@ -300,17 +299,18 @@ func TestPaymentChannelVouchers(t *testing.T) { // is greater than what's left in the channel, voucher create fails func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { _ = os.Setenv("BELLMAN_NO_GPU", "1") - clitest.QuietMiningLogs() + kit.QuietMiningLogs() blocktime := 5 * time.Millisecond ctx := context.Background() - nodes, addrs := clitest.StartTwoNodesOneMiner(ctx, t, blocktime) - paymentCreator := nodes[0] - creatorAddr := addrs[0] - receiverAddr := addrs[1] + var ( + paymentCreator kit.TestFullNode + paymentReceiver kit.TestFullNode + ) + creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := clitest.NewMockCLI(ctx, t, Commands) + mockCLI := kit.NewMockCLI(ctx, t, cli.Commands) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) // creator: paych add-funds @@ -378,7 +378,7 @@ func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) { } // waitForHeight waits for the node to reach the given chain epoch -func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height abi.ChainEpoch) { +func waitForHeight(ctx context.Context, t *testing.T, node kit.TestFullNode, height abi.ChainEpoch) { atHeight := make(chan struct{}) chainEvents := events.NewEvents(ctx, node) err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { @@ -396,7 +396,7 @@ func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height } // getPaychState gets the state of the payment channel with the given address -func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr address.Address) paych.State { +func getPaychState(ctx context.Context, t *testing.T, node kit.TestFullNode, chAddr address.Address) paych.State { act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK) require.NoError(t, err) @@ -406,3 +406,25 @@ func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr return chState } + +func startPaychCreatorReceiverMiner(ctx context.Context, t *testing.T, paymentCreator *kit.TestFullNode, paymentReceiver *kit.TestFullNode, blocktime time.Duration) (address.Address, address.Address) { + var miner kit.TestMiner + opts := kit.ThroughRPC() + kit.NewEnsemble(t, kit.MockProofs()). + FullNode(paymentCreator, opts). + FullNode(paymentReceiver, opts). + Miner(&miner, paymentCreator, kit.WithAllSubsystems()). + Start(). + InterconnectAll(). + BeginMining(blocktime) + + // Send some funds to the second node + receiverAddr, err := paymentReceiver.WalletDefaultAddress(ctx) + require.NoError(t, err) + kit.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) + + // Get the first node's address + creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx) + require.NoError(t, err) + return creatorAddr, receiverAddr +} diff --git a/itests/sdr_upgrade_test.go b/itests/sdr_upgrade_test.go new file mode 100644 index 000000000..3aa685b09 --- /dev/null +++ b/itests/sdr_upgrade_test.go @@ -0,0 +1,103 @@ +package itests + +import ( + "context" + "sort" + "sync/atomic" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/itests/kit" + bminer "github.com/filecoin-project/lotus/miner" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSDRUpgrade(t *testing.T) { + kit.QuietMiningLogs() + + // oldDelay := policy.GetPreCommitChallengeDelay() + // policy.SetPreCommitChallengeDelay(5) + // t.Cleanup(func() { + // policy.SetPreCommitChallengeDelay(oldDelay) + // }) + + blocktime := 50 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.SDRUpgradeAt(500, 1000)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll() + + build.Clock.Sleep(time.Second) + + pledge := make(chan struct{}) + mine := int64(1) + done := make(chan struct{}) + go func() { + defer close(done) + round := 0 + for atomic.LoadInt64(&mine) != 0 { + build.Clock.Sleep(blocktime) + if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) { + + }}); err != nil { + t.Error(err) + } + + // 3 sealing rounds: before, during after. + if round >= 3 { + continue + } + + head, err := client.ChainHead(ctx) + assert.NoError(t, err) + + // rounds happen every 100 blocks, with a 50 block offset. + if head.Height() >= abi.ChainEpoch(round*500+50) { + round++ + pledge <- struct{}{} + + ver, err := client.StateNetworkVersion(ctx, head.Key()) + assert.NoError(t, err) + switch round { + case 1: + assert.Equal(t, network.Version6, ver) + case 2: + assert.Equal(t, network.Version7, ver) + case 3: + assert.Equal(t, network.Version8, ver) + } + } + + } + }() + + // before. + miner.PledgeSectors(ctx, 9, 0, pledge) + + s, err := miner.SectorsList(ctx) + require.NoError(t, err) + sort.Slice(s, func(i, j int) bool { + return s[i] < s[j] + }) + + for i, id := range s { + info, err := miner.SectorsStatus(ctx, id, true) + require.NoError(t, err) + expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1 + if i >= 3 { + // after + expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1 + } + assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id) + } + + atomic.StoreInt64(&mine, 0) + <-done +} diff --git a/itests/sector_finalize_early_test.go b/itests/sector_finalize_early_test.go new file mode 100644 index 000000000..fa5cc9dd3 --- /dev/null +++ b/itests/sector_finalize_early_test.go @@ -0,0 +1,66 @@ +package itests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" +) + +func TestDealsWithFinalizeEarly(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + kit.QuietMiningLogs() + + var blockTime = 50 * time.Millisecond + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.ConstructorOpts( + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { + return func() (sealiface.Config, error) { + cf := config.DefaultStorageMiner() + cf.Sealing.FinalizeEarly = true + return modules.ToSealingConfig(cf), nil + }, nil + })))) // no mock proofs. + ens.InterconnectAll().BeginMining(blockTime) + dh := kit.NewDealHarness(t, client, miner, miner) + + ctx := context.Background() + + miner.AddStorage(ctx, t, 1000000000, true, false) + miner.AddStorage(ctx, t, 1000000000, false, true) + + sl, err := miner.StorageList(ctx) + require.NoError(t, err) + for si, d := range sl { + i, err := miner.StorageInfo(ctx, si) + require.NoError(t, err) + + fmt.Printf("stor d:%d %+v\n", len(d), i) + } + + t.Run("single", func(t *testing.T) { + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) + }) + + sl, err = miner.StorageList(ctx) + require.NoError(t, err) + for si, d := range sl { + i, err := miner.StorageInfo(ctx, si) + require.NoError(t, err) + + fmt.Printf("stor d:%d %+v\n", len(d), i) + } +} diff --git a/itests/sector_miner_collateral_test.go b/itests/sector_miner_collateral_test.go new file mode 100644 index 000000000..8e7525dba --- /dev/null +++ b/itests/sector_miner_collateral_test.go @@ -0,0 +1,132 @@ +package itests + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" +) + +func TestMinerBalanceCollateral(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 5 * time.Millisecond + + runTest := func(t *testing.T, enabled bool, nSectors int, batching bool) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts( + kit.LatestActorsAt(-1), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { + return func() (sealiface.Config, error) { + return sealiface.Config{ + MaxWaitDealsSectors: 4, + MaxSealingSectors: 4, + MaxSealingSectorsForDeals: 4, + AlwaysKeepUnsealedCopy: true, + WaitDealsDelay: time.Hour, + + BatchPreCommits: batching, + AggregateCommits: batching, + + PreCommitBatchWait: time.Hour, + CommitBatchWait: time.Hour, + + MinCommitBatch: nSectors, + MaxPreCommitBatch: nSectors, + MaxCommitBatch: nSectors, + + CollateralFromMinerBalance: enabled, + AvailableBalanceBuffer: big.Zero(), + DisableCollateralFallback: false, + AggregateAboveBaseFee: big.Zero(), + }, nil + }, nil + })), + ) + full, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + full.WaitTillChain(ctx, kit.HeightAtLeast(10)) + + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + for n := range toCheck { + st, err := miner.StorageMiner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + } + + // check that sector messages had zero value set + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + + for _, number := range sl { + si, err := miner.SectorsStatus(ctx, number, false) + require.NoError(t, err) + + require.NotNil(t, si.PreCommitMsg) + pc, err := full.ChainGetMessage(ctx, *si.PreCommitMsg) + require.NoError(t, err) + if enabled { + require.Equal(t, big.Zero(), pc.Value) + } else { + require.NotEqual(t, big.Zero(), pc.Value) + } + + require.NotNil(t, si.CommitMsg) + c, err := full.ChainGetMessage(ctx, *si.CommitMsg) + require.NoError(t, err) + if enabled { + require.Equal(t, big.Zero(), c.Value) + } + // commit value might be zero even with !enabled because in test devnets + // precommit deposit tends to be greater than collateral required at + // commit time. + } + } + + t.Run("nobatch", func(t *testing.T) { + runTest(t, true, 1, false) + }) + t.Run("batch-1", func(t *testing.T) { + runTest(t, true, 1, true) // individual commit instead of aggregate + }) + t.Run("batch-4", func(t *testing.T) { + runTest(t, true, 4, true) + }) + + t.Run("nobatch-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 1, false) + }) + t.Run("batch-1-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 1, true) // individual commit instead of aggregate + }) + t.Run("batch-4-frombalance-disabled", func(t *testing.T) { + runTest(t, false, 4, true) + }) +} diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go new file mode 100644 index 000000000..d911dcb68 --- /dev/null +++ b/itests/sector_pledge_test.go @@ -0,0 +1,145 @@ +package itests + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" +) + +func TestPledgeSectors(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blockTime) + + miner.PledgeSectors(ctx, nSectors, 0, nil) + } + + t.Run("1", func(t *testing.T) { + runTest(t, 1) + }) + + t.Run("100", func(t *testing.T) { + runTest(t, 100) + }) + + t.Run("1000", func(t *testing.T) { + if testing.Short() { // takes ~16s + t.Skip("skipping test in short mode") + } + + runTest(t, 1000) + }) +} + +func TestPledgeBatching(t *testing.T) { + blockTime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blockTime) + + client.WaitTillChain(ctx, kit.HeightAtLeast(10)) + + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors || + (states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) { + pcb, err := miner.SectorPreCommitFlush(ctx) + require.NoError(t, err) + if pcb != nil { + fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb) + } + } + + if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors || + (states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) { + cb, err := miner.SectorCommitFlush(ctx) + require.NoError(t, err) + if cb != nil { + fmt.Printf("COMMIT BATCH: %+v\n", cb) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + } + + t.Run("100", func(t *testing.T) { + runTest(t, 100) + }) +} + +func TestPledgeBeforeNv13(t *testing.T) { + blocktime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(1000000000)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + client.WaitTillChain(ctx, kit.HeightAtLeast(10)) + + toCheck := miner.StartPledge(ctx, nSectors, 0, nil) + + for len(toCheck) > 0 { + states := map[api.SectorState]int{} + + for n := range toCheck { + st, err := miner.SectorsStatus(ctx, n, false) + require.NoError(t, err) + states[st.State]++ + if st.State == api.SectorState(sealing.Proving) { + delete(toCheck, n) + } + if strings.Contains(string(st.State), "Fail") { + t.Fatal("sector in a failed state", st.State) + } + } + + build.Clock.Sleep(100 * time.Millisecond) + fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states) + } + } + + t.Run("100-before-nv13", func(t *testing.T) { + runTest(t, 100) + }) +} diff --git a/itests/sector_terminate_test.go b/itests/sector_terminate_test.go new file mode 100644 index 000000000..2fb4ef0f5 --- /dev/null +++ b/itests/sector_terminate_test.go @@ -0,0 +1,150 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +func TestTerminate(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + nSectors = 2 + ctx = context.Background() + ) + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.PresealSectors(nSectors), opts) + ens.InterconnectAll().BeginMining(blocktime) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors))) + + t.Log("Seal a sector") + + miner.PledgeSectors(ctx, 1, 0, nil) + + t.Log("wait for power") + + { + // Wait until proven. + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // 20 is some slack for the proof to be submitted + applied + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + } + + nSectors++ + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors))) + + t.Log("Terminate a sector") + + toTerminate := abi.SectorNumber(3) + + err = miner.SectorTerminate(ctx, toTerminate) + require.NoError(t, err) + + msgTriggerred := false +loop: + for { + si, err := miner.SectorsStatus(ctx, toTerminate, false) + require.NoError(t, err) + + t.Log("state: ", si.State, msgTriggerred) + + switch sealing.SectorState(si.State) { + case sealing.Terminating: + if !msgTriggerred { + { + p, err := miner.SectorTerminatePending(ctx) + require.NoError(t, err) + require.Len(t, p, 1) + require.Equal(t, abi.SectorNumber(3), p[0].Number) + } + + c, err := miner.SectorTerminateFlush(ctx) + require.NoError(t, err) + if c != nil { + msgTriggerred = true + t.Log("terminate message:", c) + + { + p, err := miner.SectorTerminatePending(ctx) + require.NoError(t, err) + require.Len(t, p, 0) + } + } + } + case sealing.TerminateWait, sealing.TerminateFinality, sealing.Removed: + break loop + } + + time.Sleep(100 * time.Millisecond) + } + + // need to wait for message to be mined and applied. + time.Sleep(5 * time.Second) + + // check power decreased + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower) + + // check in terminated set + { + parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + bflen := func(b bitfield.BitField) uint64 { + l, err := b.Count() + require.NoError(t, err) + return l + } + + require.Equal(t, uint64(1), bflen(parts[0].AllSectors)) + require.Equal(t, uint64(0), bflen(parts[0].LiveSectors)) + } + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 20 // slack like above + t.Logf("End for head.Height > %d", waitUntil) + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower) +} diff --git a/api/test/tape.go b/itests/tape_test.go similarity index 55% rename from api/test/tape.go rename to itests/tape_test.go index 74206a97a..08970152f 100644 --- a/api/test/tape.go +++ b/itests/tape_test.go @@ -1,8 +1,7 @@ -package test +package itests import ( "context" - "fmt" "testing" "time" @@ -11,18 +10,23 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/stmgr" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" "github.com/stretchr/testify/require" ) -func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) { +func TestTapeFix(t *testing.T) { + kit.QuietMiningLogs() + + var blocktime = 2 * time.Millisecond + // The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case // TODO: Make the mock sector size configurable and reenable this - //t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) }) - t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) }) + // t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) }) + t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) }) } -func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) { + +func testTapeFix(t *testing.T, blocktime time.Duration, after bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -38,46 +42,14 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool }) } - n, sn := b(t, []FullNodeOpts{{Opts: func(_ []TestNode) node.Option { - return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule) - }}}, OneMiner) - - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - build.Clock.Sleep(time.Second) - - done := make(chan struct{}) - go func() { - defer close(done) - for ctx.Err() == nil { - build.Clock.Sleep(blocktime) - if err := sn[0].MineOne(ctx, MineNext); err != nil { - if ctx.Err() != nil { - // context was canceled, ignore the error. - return - } - t.Error(err) - } - } - }() - defer func() { - cancel() - <-done - }() + nopts := kit.ConstructorOpts(node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)) + _, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), nopts) + ens.InterconnectAll().BeginMining(blocktime) sid, err := miner.PledgeSector(ctx) require.NoError(t, err) - fmt.Printf("All sectors is fsm\n") + t.Log("All sectors is fsm") // If before, we expect the precommit to fail successState := api.SectorState(sealing.CommitFailed) @@ -95,7 +67,6 @@ func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool } require.NotEqual(t, failureState, st.State) build.Clock.Sleep(100 * time.Millisecond) - fmt.Println("WaitSeal") + t.Log("WaitSeal") } - } diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go new file mode 100644 index 000000000..28a72263e --- /dev/null +++ b/itests/verifreg_test.go @@ -0,0 +1,144 @@ +package itests + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/itests/kit" + verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" + "github.com/stretchr/testify/require" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/impl" +) + +func TestVerifiedClientTopUp(t *testing.T) { + blockTime := 100 * time.Millisecond + + test := func(nv network.Version, shouldWork bool) func(*testing.T) { + return func(t *testing.T) { + rootKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifierKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifiedClientKey, err := wallet.GenerateKey(types.KTBLS) + require.NoError(t, err) + + bal, err := types.ParseFIL("100fil") + require.NoError(t, err) + + node, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), + kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), // assign some balance to the verifier so they can send an AddClient message. + kit.ConstructorOpts(kit.InstantaneousNetworkVersion(nv))) + + ens.InterconnectAll().BeginMining(blockTime) + + api := node.FullNode.(*impl.FullNodeAPI) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // get VRH + vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) + fmt.Println(vrh.String()) + require.NoError(t, err) + + // import the root key. + rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo) + require.NoError(t, err) + + // import the verifier's key. + verifierAddr, err := api.WalletImport(ctx, &verifierKey.KeyInfo) + require.NoError(t, err) + + // import the verified client's key. + verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo) + require.NoError(t, err) + + params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifierAddr, Allowance: big.NewInt(100000000000)}) + require.NoError(t, err) + + msg := &types.Message{ + From: rootAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifier, + Params: params, + Value: big.Zero(), + } + + sm, err := api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err, "AddVerifier failed") + + res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // assign datacap to a client + datacap := big.NewInt(10000) + + params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap}) + require.NoError(t, err) + + msg = &types.Message{ + From: verifierAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + sm, err = api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // check datacap balance + dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + + if !dcap.Equals(datacap) { + t.Fatal("") + } + + // try to assign datacap to the same client should fail for actor v4 and below + params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: datacap}) + if err != nil { + t.Fatal(err) + } + + msg = &types.Message{ + From: verifierAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + _, err = api.MpoolPushMessage(ctx, msg, nil) + if shouldWork && err != nil { + t.Fatal("expected nil err", err) + } + + if !shouldWork && (err == nil || !strings.Contains(err.Error(), "verified client already exists")) { + t.Fatal("Add datacap to an existing verified client should fail") + } + } + } + + t.Run("nv12", test(network.Version12, false)) + t.Run("nv13", test(network.Version13, true)) +} diff --git a/itests/wdpost_dispute_test.go b/itests/wdpost_dispute_test.go new file mode 100644 index 000000000..f73882032 --- /dev/null +++ b/itests/wdpost_dispute_test.go @@ -0,0 +1,368 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" + "github.com/stretchr/testify/require" +) + +func TestWindowPostDispute(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + blocktime := 2 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + client kit.TestFullNode + chainMiner kit.TestMiner + evilMiner kit.TestMiner + ) + + // First, we configure two miners. After sealing, we're going to turn off the first miner so + // it doesn't submit proofs. + // + // Then we're going to manually submit bad proofs. + opts := []kit.NodeOpt{kit.ConstructorOpts(kit.LatestActorsAt(-1))} + opts = append(opts, kit.WithAllSubsystems()) + ens := kit.NewEnsemble(t, kit.MockProofs()). + FullNode(&client, opts...). + Miner(&chainMiner, &client, opts...). + Miner(&evilMiner, &client, append(opts, kit.PresealSectors(0))...). + Start() + + defaultFrom, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // Mine with the _second_ node (the good one). + ens.InterconnectAll().BeginMining(blocktime, &chainMiner) + + // Give the chain miner enough sectors to win every block. + chainMiner.PledgeSectors(ctx, 10, 0, nil) + // And the evil one 1 sector. No cookie for you. + evilMiner.PledgeSectors(ctx, 1, 0, nil) + + // Let the evil miner's sectors gain power. + evilMinerAddr, err := evilMiner.ActorAddress(ctx) + require.NoError(t, err) + + di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + t.Logf("Running one proving period\n") + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1 + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr) + require.NoError(t, err) + + // make sure it has gained power. + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + + evilSectors, err := evilMiner.SectorsList(ctx) + require.NoError(t, err) + evilSectorNo := evilSectors[0] // only one. + evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK) + require.NoError(t, err) + + t.Log("evil miner stopping") + + // Now stop the evil miner, and start manually submitting bad proofs. + require.NoError(t, evilMiner.Stop(ctx)) + + t.Log("evil miner stopped") + + // Wait until we need to prove our sector. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { + break + } + build.Clock.Sleep(blocktime) + } + + err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) + require.NoError(t, err, "evil proof not accepted") + + // Wait until after the proving period. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index != evilSectorLoc.Deadline { + break + } + build.Clock.Sleep(blocktime) + } + + t.Log("accepted evil proof") + + // Make sure the evil node didn't lose any power. + p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + + // OBJECTION! The good miner files a DISPUTE!!!! + { + params := &minerActor.DisputeWindowedPoStParams{ + Deadline: evilSectorLoc.Deadline, + PoStIndex: 0, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: evilMinerAddr, + Method: minerActor.Methods.DisputeWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: defaultFrom, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + t.Log("waiting dispute") + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error()) + } + + // Objection SUSTAINED! + // Make sure the evil node lost power. + p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + require.True(t, p.MinerPower.RawBytePower.IsZero()) + + // Now we begin the redemption arc. + require.True(t, p.MinerPower.RawBytePower.IsZero()) + + // First, recover the sector. + + { + minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + + params := &minerActor.DeclareFaultsRecoveredParams{ + Recoveries: []minerActor.RecoveryDeclaration{{ + Deadline: evilSectorLoc.Deadline, + Partition: evilSectorLoc.Partition, + Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}), + }}, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: evilMinerAddr, + Method: minerActor.Methods.DeclareFaultsRecovered, + Params: enc, + Value: types.FromFil(30), // repay debt. + From: minerInfo.Owner, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error()) + } + + // Then wait for the deadline. + for { + di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) + require.NoError(t, err) + if di.Index == evilSectorLoc.Deadline { + break + } + build.Clock.Sleep(blocktime) + } + + // Now try to be evil again + err = submitBadProof(ctx, client, evilMiner.OwnerKey.Address, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition) + require.Error(t, err) + require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt") + + // It didn't work because we're recovering. +} + +func TestWindowPostDisputeFails(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + blocktime := 2 * time.Millisecond + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + defaultFrom, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + build.Clock.Sleep(time.Second) + + miner.PledgeSectors(ctx, 10, 0, nil) + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + t.Log("Running one proving period") + waitUntil := di.PeriodStart + di.WPoStProvingPeriod*2 + 1 + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10)) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + // make sure it has gained power. + require.Equal(t, p.MinerPower.RawBytePower, expectedPower) + + // Wait until a proof has been submitted. + var targetDeadline uint64 +waitForProof: + for { + deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + for dlIdx, dl := range deadlines { + nonEmpty, err := dl.PostSubmissions.IsEmpty() + require.NoError(t, err) + if nonEmpty { + targetDeadline = uint64(dlIdx) + break waitForProof + } + } + + build.Clock.Sleep(blocktime) + } + + for { + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + // wait until the deadline finishes. + if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) { + break + } + + build.Clock.Sleep(blocktime) + } + + // Try to object to the proof. This should fail. + { + params := &minerActor.DisputeWindowedPoStParams{ + Deadline: targetDeadline, + PoStIndex: 0, + } + + enc, aerr := actors.SerializeParams(params) + require.NoError(t, aerr) + + msg := &types.Message{ + To: maddr, + Method: minerActor.Methods.DisputeWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: defaultFrom, + } + _, err := client.MpoolPushMessage(ctx, msg, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)") + } +} + +func submitBadProof( + ctx context.Context, + client api.FullNode, owner address.Address, maddr address.Address, + di *dline.Info, dlIdx, partIdx uint64, +) error { + head, err := client.ChainHead(ctx) + if err != nil { + return err + } + + minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key()) + if err != nil { + return err + } + + commEpoch := di.Open + commRand, err := client.ChainGetRandomnessFromTickets( + ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit, + commEpoch, nil, + ) + if err != nil { + return err + } + params := &minerActor.SubmitWindowedPoStParams{ + ChainCommitEpoch: commEpoch, + ChainCommitRand: commRand, + Deadline: dlIdx, + Partitions: []minerActor.PoStPartition{{Index: partIdx}}, + Proofs: []proof3.PoStProof{{ + PoStProof: minerInfo.WindowPoStProofType, + ProofBytes: []byte("I'm soooo very evil."), + }}, + } + + enc, aerr := actors.SerializeParams(params) + if aerr != nil { + return aerr + } + + msg := &types.Message{ + To: maddr, + Method: minerActor.Methods.SubmitWindowedPoSt, + Params: enc, + Value: types.NewInt(0), + From: owner, + } + sm, err := client.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return err + } + + rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) + if err != nil { + return err + } + if rec.Receipt.ExitCode.IsError() { + return rec.Receipt.ExitCode + } + return nil +} diff --git a/itests/wdpost_test.go b/itests/wdpost_test.go new file mode 100644 index 000000000..6764350cc --- /dev/null +++ b/itests/wdpost_test.go @@ -0,0 +1,314 @@ +package itests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/extern/sector-storage/mock" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node/impl" +) + +func TestWindowedPost(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + nSectors = 10 + ) + + for _, height := range []abi.ChainEpoch{ + -1, // before + 162, // while sealing + 5000, // while proving + } { + height := height // copy to satisfy lints + t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) { + testWindowPostUpgrade(t, blocktime, nSectors, height) + }) + } +} + +func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, upgradeHeight abi.ChainEpoch) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + opts := kit.ConstructorOpts(kit.LatestActorsAt(upgradeHeight)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + miner.PledgeSectors(ctx, nSectors, 0, nil) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + mid, err := address.IDFromAddress(maddr) + require.NoError(t, err) + + t.Log("Running one proving period") + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + ssz, err := miner.ActorSectorSize(ctx, maddr) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+kit.DefaultPresealsPerBootstrapMiner))) + + t.Log("Drop some sectors") + + // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) + { + parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + secs := parts[0].AllSectors + n, err := secs.Count() + require.NoError(t, err) + require.Equal(t, uint64(2), n) + + // Drop the partition + err = secs.ForEach(func(sid uint64) error { + return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sid), + }, + }, true) + }) + require.NoError(t, err) + } + + var s storage.SectorRef + + // Drop 1 sectors from deadline 3 partition 0 + { + parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK) + require.NoError(t, err) + require.Greater(t, len(parts), 0) + + secs := parts[0].AllSectors + n, err := secs.Count() + require.NoError(t, err) + require.Equal(t, uint64(2), n) + + // Drop the sector + sn, err := secs.First() + require.NoError(t, err) + + all, err := secs.All(2) + require.NoError(t, err) + t.Log("the sectors", all) + + s = storage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: abi.SectorNumber(sn), + }, + } + + err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true) + require.NoError(t, err) + } + + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + t.Log("Go through another PP, wait for sectors to become faulty") + waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) + + ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-3, int(sectors)) // -3 just removed sectors + + t.Log("Recover one sector") + + err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) + require.NoError(t, err) + + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil = di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d", waitUntil) + + ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2, int(sectors)) // -2 not recovered sectors + + // pledge a sector after recovery + + miner.PledgeSectors(ctx, 1, nSectors, nil) + + { + // Wait until proven. + di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2 + t.Logf("End for head.Height > %d\n", waitUntil) + + ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) + t.Logf("Now head.Height = %d", ts.Height()) + } + + p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, p.MinerPower, p.TotalPower) + + sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz) + require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged +} + +func TestWindowPostBaseFeeNoBurn(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + nSectors = 10 + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sched := kit.DefaultTestUpgradeSchedule + lastUpgradeHeight := sched[len(sched)-1].Height + + och := build.UpgradeClausHeight + build.UpgradeClausHeight = lastUpgradeHeight + 1 + + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) + ens.InterconnectAll().BeginMining(blocktime) + + // Wait till all upgrades are done and we've passed the clause epoch. + client.WaitTillChain(ctx, kit.HeightAtLeast(build.UpgradeClausHeight+1)) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + miner.PledgeSectors(ctx, nSectors, 0, nil) + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) + require.NoError(t, err) + + require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero()) + + build.UpgradeClausHeight = och +} + +func TestWindowPostBaseFeeBurn(t *testing.T) { + kit.Expensive(t) + + kit.QuietMiningLogs() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocktime := 2 * time.Millisecond + + opts := kit.ConstructorOpts(kit.LatestActorsAt(-1)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + ens.InterconnectAll().BeginMining(blocktime) + + // Ideally we'd be a bit more precise here, but getting the information we need from the + // test framework is more work than it's worth. + // + // We just need to wait till all upgrades are done. + client.WaitTillChain(ctx, kit.HeightAtLeast(20)) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + miner.PledgeSectors(ctx, 10, 0, nil) + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + en := wact.Nonce + + // wait for a new message to be sent from worker address, it will be a PoSt + +waitForProof: + for { + wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) + require.NoError(t, err) + if wact.Nonce > en { + break waitForProof + } + + build.Clock.Sleep(blocktime) + } + + slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) + require.NoError(t, err) + + pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) + require.NoError(t, err) + + require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero()) +} diff --git a/lib/rpcenc/reader.go b/lib/rpcenc/reader.go index 8bd512705..23944af6c 100644 --- a/lib/rpcenc/reader.go +++ b/lib/rpcenc/reader.go @@ -78,27 +78,38 @@ func ReaderParamEncoder(addr string) jsonrpc.Option { }) } -type waitReadCloser struct { +// watchReadCloser watches the ReadCloser and closes the watch channel when +// either: (1) the ReaderCloser fails on Read (including with a benign error +// like EOF), or (2) when Close is called. +// +// Use it be notified of terminal states, in situations where a Read failure (or +// EOF) is considered a terminal state too (besides Close). +type watchReadCloser struct { io.ReadCloser - wait chan struct{} + watch chan struct{} + closeOnce sync.Once } -func (w *waitReadCloser) Read(p []byte) (int, error) { +func (w *watchReadCloser) Read(p []byte) (int, error) { n, err := w.ReadCloser.Read(p) if err != nil { - close(w.wait) + w.closeOnce.Do(func() { + close(w.watch) + }) } return n, err } -func (w *waitReadCloser) Close() error { - close(w.wait) +func (w *watchReadCloser) Close() error { + w.closeOnce.Do(func() { + close(w.watch) + }) return w.ReadCloser.Close() } func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { var readersLk sync.Mutex - readers := map[uuid.UUID]chan *waitReadCloser{} + readers := map[uuid.UUID]chan *watchReadCloser{} hnd := func(resp http.ResponseWriter, req *http.Request) { strId := path.Base(req.URL.Path) @@ -111,14 +122,14 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { readersLk.Lock() ch, found := readers[u] if !found { - ch = make(chan *waitReadCloser) + ch = make(chan *watchReadCloser) readers[u] = ch } readersLk.Unlock() - wr := &waitReadCloser{ + wr := &watchReadCloser{ ReadCloser: req.Body, - wait: make(chan struct{}), + watch: make(chan struct{}), } tctx, cancel := context.WithTimeout(req.Context(), Timeout) @@ -134,7 +145,9 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { } select { - case <-wr.wait: + case <-wr.watch: + // TODO should we check if we failed the Read, and if so + // return an HTTP 500? i.e. turn watch into a chan error? case <-req.Context().Done(): log.Errorf("context error in reader stream handler (2): %v", req.Context().Err()) resp.WriteHeader(500) @@ -167,7 +180,7 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) { readersLk.Lock() ch, found := readers[u] if !found { - ch = make(chan *waitReadCloser) + ch = make(chan *watchReadCloser) readers[u] = ch } readersLk.Unlock() diff --git a/lib/stati/covar.go b/lib/stati/covar.go new file mode 100644 index 000000000..c92fd8b74 --- /dev/null +++ b/lib/stati/covar.go @@ -0,0 +1,104 @@ +package stati + +import "math" + +type Covar struct { + meanX float64 + meanY float64 + c float64 + n float64 + m2x float64 + m2y float64 +} + +func (cov1 *Covar) MeanX() float64 { + return cov1.meanX +} + +func (cov1 *Covar) MeanY() float64 { + return cov1.meanY +} + +func (cov1 *Covar) N() float64 { + return cov1.n +} + +func (cov1 *Covar) Covariance() float64 { + return cov1.c / (cov1.n - 1) +} + +func (cov1 *Covar) VarianceX() float64 { + return cov1.m2x / (cov1.n - 1) +} + +func (cov1 *Covar) StddevX() float64 { + return math.Sqrt(cov1.VarianceX()) +} + +func (cov1 *Covar) VarianceY() float64 { + return cov1.m2y / (cov1.n - 1) +} + +func (cov1 *Covar) StddevY() float64 { + return math.Sqrt(cov1.VarianceY()) +} + +func (cov1 *Covar) AddPoint(x, y float64) { + cov1.n++ + + dx := x - cov1.meanX + cov1.meanX += dx / cov1.n + dx2 := x - cov1.meanX + cov1.m2x += dx * dx2 + + dy := y - cov1.meanY + cov1.meanY += dy / cov1.n + dy2 := y - cov1.meanY + cov1.m2y += dy * dy2 + + cov1.c += dx * dy +} + +func (cov1 *Covar) Combine(cov2 *Covar) { + if cov1.n == 0 { + *cov1 = *cov2 + return + } + if cov2.n == 0 { + return + } + + if cov1.n == 1 { + cpy := *cov2 + cpy.AddPoint(cov2.meanX, cov2.meanY) + *cov1 = cpy + return + } + if cov2.n == 1 { + cov1.AddPoint(cov2.meanX, cov2.meanY) + } + + out := Covar{} + out.n = cov1.n + cov2.n + + dx := cov1.meanX - cov2.meanX + out.meanX = cov1.meanX - dx*cov2.n/out.n + out.m2x = cov1.m2x + cov2.m2x + dx*dx*cov1.n*cov2.n/out.n + + dy := cov1.meanY - cov2.meanY + out.meanY = cov1.meanY - dy*cov2.n/out.n + out.m2y = cov1.m2y + cov2.m2y + dy*dy*cov1.n*cov2.n/out.n + + out.c = cov1.c + cov2.c + dx*dy*cov1.n*cov2.n/out.n + *cov1 = out +} + +func (cov1 *Covar) A() float64 { + return cov1.Covariance() / cov1.VarianceX() +} +func (cov1 *Covar) B() float64 { + return cov1.meanY - cov1.meanX*cov1.A() +} +func (cov1 *Covar) Correl() float64 { + return cov1.Covariance() / cov1.StddevX() / cov1.StddevY() +} diff --git a/lib/stati/histo.go b/lib/stati/histo.go new file mode 100644 index 000000000..3c410c0d0 --- /dev/null +++ b/lib/stati/histo.go @@ -0,0 +1,56 @@ +package stati + +import ( + "math" + + "golang.org/x/xerrors" +) + +type Histogram struct { + Buckets []float64 + Counts []uint64 +} + +// NewHistogram creates a histograme with buckets defined as: +// {x > -Inf, x >= buckets[0], x >= buckets[1], ..., x >= buckets[i]} +func NewHistogram(buckets []float64) (*Histogram, error) { + if len(buckets) == 0 { + return nil, xerrors.Errorf("empty buckets") + } + prev := buckets[0] + for i, v := range buckets[1:] { + if v < prev { + return nil, xerrors.Errorf("bucket at index %d is smaller than previous %f < %f", i+1, v, prev) + } + prev = v + } + h := &Histogram{ + Buckets: append([]float64{math.Inf(-1)}, buckets...), + Counts: make([]uint64, len(buckets)+1), + } + return h, nil +} + +func (h *Histogram) Observe(x float64) { + for i, b := range h.Buckets { + if x >= b { + h.Counts[i]++ + } else { + break + } + } +} + +func (h *Histogram) Total() uint64 { + return h.Counts[0] +} + +func (h *Histogram) Get(i int) uint64 { + if i >= len(h.Counts)-2 { + return h.Counts[i] + } + return h.Counts[i+1] - h.Counts[i+2] +} +func (h *Histogram) GetRatio(i int) float64 { + return float64(h.Get(i)) / float64(h.Total()) +} diff --git a/lib/stati/meanvar.go b/lib/stati/meanvar.go new file mode 100644 index 000000000..b77aaa638 --- /dev/null +++ b/lib/stati/meanvar.go @@ -0,0 +1,66 @@ +package stati + +import ( + "fmt" + "math" +) + +type MeanVar struct { + n float64 + mean float64 + m2 float64 +} + +func (v1 *MeanVar) AddPoint(value float64) { + // based on https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm + v1.n++ + delta := value - v1.mean + v1.mean += delta / v1.n + delta2 := value - v1.mean + v1.m2 += delta * delta2 +} + +func (v1 *MeanVar) Mean() float64 { + return v1.mean +} +func (v1 *MeanVar) N() float64 { + return v1.n +} +func (v1 *MeanVar) Variance() float64 { + return v1.m2 / (v1.n - 1) +} +func (v1 *MeanVar) Stddev() float64 { + return math.Sqrt(v1.Variance()) +} + +func (v1 MeanVar) String() string { + return fmt.Sprintf("%f stddev: %f (%.0f)", v1.Mean(), v1.Stddev(), v1.N()) +} + +func (v1 *MeanVar) Combine(v2 *MeanVar) { + if v1.n == 0 { + *v1 = *v2 + return + } + if v2.n == 0 { + return + } + if v1.n == 1 { + cpy := *v2 + cpy.AddPoint(v1.mean) + *v1 = cpy + return + } + if v2.n == 1 { + v1.AddPoint(v2.mean) + return + } + + newCount := v1.n + v2.n + delta := v2.mean - v1.mean + meanDelta := delta * v2.n / newCount + m2 := v1.m2 + v2.m2 + delta*meanDelta*v1.n + v1.n = newCount + v1.mean += meanDelta + v1.m2 = m2 +} diff --git a/lib/stati/stats_test.go b/lib/stati/stats_test.go new file mode 100644 index 000000000..fa92913b6 --- /dev/null +++ b/lib/stati/stats_test.go @@ -0,0 +1,47 @@ +package stati + +import ( + "math/rand" + "testing" +) + +func TestMeanVar(t *testing.T) { + N := 16 + ss := make([]*MeanVar, N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < N; i++ { + ss[i] = &MeanVar{} + maxJ := rng.Intn(1000) + for j := 0; j < maxJ; j++ { + ss[i].AddPoint(rng.NormFloat64()*5 + 500) + } + t.Logf("mean: %f, stddev: %f, count %f", ss[i].mean, ss[i].Stddev(), ss[i].n) + } + out := &MeanVar{} + for i := 0; i < N; i++ { + out.Combine(ss[i]) + t.Logf("combine: mean: %f, stddev: %f", out.mean, out.Stddev()) + } +} + +func TestCovar(t *testing.T) { + N := 16 + ss := make([]*Covar, N) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < N; i++ { + ss[i] = &Covar{} + maxJ := rng.Intn(1000) + 500 + for j := 0; j < maxJ; j++ { + x := rng.NormFloat64()*5 + 500 + ss[i].AddPoint(x, x*2-1000) + } + t.Logf("corell: %f, y = %f*x+%f @%.0f", ss[i].Correl(), ss[i].A(), ss[i].B(), ss[i].n) + t.Logf("\txVar: %f yVar: %f covar: %f", ss[i].StddevX(), ss[i].StddevY(), ss[i].Covariance()) + } + out := &Covar{} + for i := 0; i < N; i++ { + out.Combine(ss[i]) + t.Logf("combine: corell: %f, y = %f*x+%f", out.Correl(), out.A(), out.B()) + t.Logf("\txVar: %f yVar: %f covar: %f", out.StddevX(), out.StddevY(), out.Covariance()) + } +} diff --git a/lib/tracing/setup.go b/lib/tracing/setup.go index 141683b39..b8c0399ad 100644 --- a/lib/tracing/setup.go +++ b/lib/tracing/setup.go @@ -2,6 +2,7 @@ package tracing import ( "os" + "strings" "contrib.go.opencensus.io/exporter/jaeger" logging "github.com/ipfs/go-log/v2" @@ -10,19 +11,63 @@ import ( var log = logging.Logger("tracing") -func SetupJaegerTracing(serviceName string) *jaeger.Exporter { +const ( + // environment variable names + envCollectorEndpoint = "LOTUS_JAEGER_COLLECTOR_ENDPOINT" + envAgentEndpoint = "LOTUS_JAEGER_AGENT_ENDPOINT" + envAgentHost = "LOTUS_JAEGER_AGENT_HOST" + envAgentPort = "LOTUS_JAEGER_AGENT_PORT" + envJaegerUser = "LOTUS_JAEGER_USERNAME" + envJaegerCred = "LOTUS_JAEGER_PASSWORD" +) - if _, ok := os.LookupEnv("LOTUS_JAEGER"); !ok { +// When sending directly to the collector, agent options are ignored. +// The collector endpoint is an HTTP or HTTPs URL. +// The agent endpoint is a thrift/udp protocol and should be given +// as a string like "hostname:port". The agent can also be configured +// with separate host and port variables. +func jaegerOptsFromEnv(opts *jaeger.Options) bool { + var e string + var ok bool + if e, ok = os.LookupEnv(envJaegerUser); ok { + if p, ok := os.LookupEnv(envJaegerCred); ok { + opts.Username = e + opts.Password = p + } else { + log.Warn("jaeger username supplied with no password. authentication will not be used.") + } + } + if e, ok = os.LookupEnv(envCollectorEndpoint); ok { + opts.CollectorEndpoint = e + log.Infof("jaeger tracess will send to collector %s", e) + return true + } + if e, ok = os.LookupEnv(envAgentEndpoint); ok { + log.Infof("jaeger traces will be sent to agent %s", e) + opts.AgentEndpoint = e + return true + } + if e, ok = os.LookupEnv(envAgentHost); ok { + if p, ok := os.LookupEnv(envAgentPort); ok { + opts.AgentEndpoint = strings.Join([]string{e, p}, ":") + } else { + opts.AgentEndpoint = strings.Join([]string{e, "6831"}, ":") + } + log.Infof("jaeger traces will be sent to agent %s", opts.AgentEndpoint) + return true + } + return false +} + +func SetupJaegerTracing(serviceName string) *jaeger.Exporter { + opts := jaeger.Options{} + if !jaegerOptsFromEnv(&opts) { return nil } - agentEndpointURI := os.Getenv("LOTUS_JAEGER") - - je, err := jaeger.NewExporter(jaeger.Options{ - AgentEndpoint: agentEndpointURI, - ServiceName: serviceName, - }) + opts.ServiceName = serviceName + je, err := jaeger.NewExporter(opts) if err != nil { - log.Errorw("Failed to create the Jaeger exporter", "error", err) + log.Errorw("failed to create the jaeger exporter", "error", err) return nil } diff --git a/lotuspond/spawn.go b/lotuspond/spawn.go index 9085bc24a..900c372b1 100644 --- a/lotuspond/spawn.go +++ b/lotuspond/spawn.go @@ -11,6 +11,9 @@ import ( "sync/atomic" "time" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/google/uuid" "golang.org/x/xerrors" @@ -48,7 +51,12 @@ func (api *api) Spawn() (nodeInfo, error) { } sbroot := filepath.Join(dir, "preseal") - genm, ki, err := seed.PreSeal(genMiner, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, 2, sbroot, []byte("8"), nil, false) + spt, err := miner.SealProofTypeFromSectorSize(2<<10, build.NewestNetworkVersion) + if err != nil { + return nodeInfo{}, err + } + + genm, ki, err := seed.PreSeal(genMiner, spt, 0, 2, sbroot, []byte("8"), nil, false) if err != nil { return nodeInfo{}, xerrors.Errorf("preseal failed: %w", err) } @@ -71,6 +79,7 @@ func (api *api) Spawn() (nodeInfo, error) { template.VerifregRootKey = gen.DefaultVerifregRootkeyActor template.RemainderAccount = gen.DefaultRemainderAccountActor template.NetworkName = "pond-" + uuid.New().String() + template.NetworkVersion = build.NewestNetworkVersion tb, err := json.Marshal(&template) if err != nil { diff --git a/markets/pricing/cli.go b/markets/pricing/cli.go new file mode 100644 index 000000000..3c2a5f248 --- /dev/null +++ b/markets/pricing/cli.go @@ -0,0 +1,48 @@ +package pricing + +import ( + "bytes" + "context" + "encoding/json" + "os/exec" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "golang.org/x/xerrors" +) + +func ExternalRetrievalPricingFunc(cmd string) dtypes.RetrievalPricingFunc { + return func(ctx context.Context, pricingInput retrievalmarket.PricingInput) (retrievalmarket.Ask, error) { + return runPricingFunc(ctx, cmd, pricingInput) + } +} + +func runPricingFunc(_ context.Context, cmd string, params interface{}) (retrievalmarket.Ask, error) { + j, err := json.Marshal(params) + if err != nil { + return retrievalmarket.Ask{}, err + } + + var out bytes.Buffer + var errb bytes.Buffer + + c := exec.Command("sh", "-c", cmd) + c.Stdin = bytes.NewReader(j) + c.Stdout = &out + c.Stderr = &errb + + switch err := c.Run().(type) { + case nil: + bz := out.Bytes() + resp := retrievalmarket.Ask{} + + if err := json.Unmarshal(bz, &resp); err != nil { + return resp, xerrors.Errorf("failed to parse pricing output %s, err=%w", string(bz), err) + } + return resp, nil + case *exec.ExitError: + return retrievalmarket.Ask{}, xerrors.Errorf("pricing func exited with error: %s", errb.String()) + default: + return retrievalmarket.Ask{}, xerrors.Errorf("pricing func cmd run error: %w", err) + } +} diff --git a/markets/retrievaladapter/provider.go b/markets/retrievaladapter/provider.go index e58257c8a..06ebd989a 100644 --- a/markets/retrievaladapter/provider.go +++ b/markets/retrievaladapter/provider.go @@ -4,36 +4,42 @@ import ( "context" "io" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/storage/sectorblocks" + "github.com/hashicorp/go-multierror" + "golang.org/x/xerrors" "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-state-types/abi" specstorage "github.com/filecoin-project/specs-storage/storage" + + logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("retrievaladapter") type retrievalProviderNode struct { - miner *storage.Miner - sealer sectorstorage.SectorManager - full v1api.FullNode + maddr address.Address + secb sectorblocks.SectorBuilder + pp sectorstorage.PieceProvider + full v1api.FullNode } // NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the // Lotus Node -func NewRetrievalProviderNode(miner *storage.Miner, sealer sectorstorage.SectorManager, full v1api.FullNode) retrievalmarket.RetrievalProviderNode { - return &retrievalProviderNode{miner, sealer, full} +func NewRetrievalProviderNode(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.RetrievalProviderNode { + return &retrievalProviderNode{address.Address(maddr), secb, pp, full} } func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { @@ -48,13 +54,12 @@ func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, min func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length) - - si, err := rpn.miner.GetSectorInfo(sectorID) + si, err := rpn.sectorsStatus(ctx, sectorID, true) if err != nil { return nil, err } - mid, err := address.IDFromAddress(rpn.miner.Address()) + mid, err := address.IDFromAddress(rpn.maddr) if err != nil { return nil, err } @@ -64,27 +69,21 @@ func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi Miner: abi.ActorID(mid), Number: sectorID, }, - ProofType: si.SectorType, + ProofType: si.SealProof, } - // Set up a pipe so that data can be written from the unsealing process - // into the reader returned by this function - r, w := io.Pipe() - go func() { - var commD cid.Cid - if si.CommD != nil { - commD = *si.CommD - } + var commD cid.Cid + if si.CommD != nil { + commD = *si.CommD + } - // Read the piece into the pipe's writer, unsealing the piece if necessary - log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid) - err := rpn.sealer.ReadPiece(ctx, w, ref, storiface.UnpaddedByteIndex(offset), length, si.TicketValue, commD) - if err != nil { - log.Errorf("failed to unseal piece from sector %d: %s", sectorID, err) - } - // Close the reader with any error that was returned while reading the piece - _ = w.CloseWithError(err) - }() + // Get a reader for the piece, unsealing the piece if necessary + log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid) + r, unsealed, err := rpn.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD) + if err != nil { + return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err) + } + _ = unsealed // todo: use return r, nil } @@ -104,3 +103,109 @@ func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipS return head.Key().Bytes(), head.Height(), nil } + +func (rpn *retrievalProviderNode) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { + si, err := rpn.sectorsStatus(ctx, sectorID, true) + if err != nil { + return false, xerrors.Errorf("failed to get sector info: %w", err) + } + + mid, err := address.IDFromAddress(rpn.maddr) + if err != nil { + return false, err + } + + ref := specstorage.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(mid), + Number: sectorID, + }, + ProofType: si.SealProof, + } + + log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length) + return rpn.pp.IsUnsealed(ctx, ref, storiface.UnpaddedByteIndex(offset), length) +} + +// GetRetrievalPricingInput takes a set of candidate storage deals that can serve a retrieval request, +// and returns an minimally populated PricingInput. This PricingInput should be enhanced +// with more data, and passed to the pricing function to determine the final quoted price. +func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) { + resp := retrievalmarket.PricingInput{} + + head, err := rpn.full.ChainHead(ctx) + if err != nil { + return resp, xerrors.Errorf("failed to get chain head: %w", err) + } + tsk := head.Key() + + var mErr error + + for _, dealID := range storageDeals { + ds, err := rpn.full.StateMarketStorageDeal(ctx, dealID, tsk) + if err != nil { + log.Warnf("failed to look up deal %d on chain: err=%w", dealID, err) + mErr = multierror.Append(mErr, err) + continue + } + if ds.Proposal.VerifiedDeal { + resp.VerifiedDeal = true + } + + if ds.Proposal.PieceCID.Equals(pieceCID) { + resp.PieceSize = ds.Proposal.PieceSize.Unpadded() + } + + // If we've discovered a verified deal with the required PieceCID, we don't need + // to lookup more deals and we're done. + if resp.VerifiedDeal && resp.PieceSize != 0 { + break + } + } + + // Note: The piece size can never actually be zero. We only use it to here + // to assert that we didn't find a matching piece. + if resp.PieceSize == 0 { + if mErr == nil { + return resp, xerrors.New("failed to find matching piece") + } + + return resp, xerrors.Errorf("failed to fetch storage deal state: %w", mErr) + } + + return resp, nil +} + +func (rpn *retrievalProviderNode) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + sInfo, err := rpn.secb.SectorsStatus(ctx, sid, false) + if err != nil { + return api.SectorInfo{}, err + } + + if !showOnChainInfo { + return sInfo, nil + } + + onChainInfo, err := rpn.full.StateSectorGetInfo(ctx, rpn.maddr, sid, types.EmptyTSK) + if err != nil { + return sInfo, err + } + if onChainInfo == nil { + return sInfo, nil + } + sInfo.SealProof = onChainInfo.SealProof + sInfo.Activation = onChainInfo.Activation + sInfo.Expiration = onChainInfo.Expiration + sInfo.DealWeight = onChainInfo.DealWeight + sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight + sInfo.InitialPledge = onChainInfo.InitialPledge + + ex, err := rpn.full.StateSectorExpiration(ctx, rpn.maddr, sid, types.EmptyTSK) + if err != nil { + return sInfo, nil + } + sInfo.OnTime = ex.OnTime + sInfo.Early = ex.Early + + return sInfo, nil +} diff --git a/markets/retrievaladapter/provider_test.go b/markets/retrievaladapter/provider_test.go new file mode 100644 index 000000000..eca3b1152 --- /dev/null +++ b/markets/retrievaladapter/provider_test.go @@ -0,0 +1,202 @@ +package retrievaladapter + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + testnet "github.com/filecoin-project/go-fil-markets/shared_testutil" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/mocks" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/types" + "github.com/golang/mock/gomock" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" +) + +func TestGetPricingInput(t *testing.T) { + ctx := context.Background() + tsk := &types.TipSet{} + key := tsk.Key() + + pcid := testnet.GenerateCids(1)[0] + deals := []abi.DealID{1, 2} + paddedSize := abi.PaddedPieceSize(128) + unpaddedSize := paddedSize.Unpadded() + + tcs := map[string]struct { + pieceCid cid.Cid + deals []abi.DealID + fFnc func(node *mocks.MockFullNode) + + expectedErrorStr string + expectedVerified bool + expectedPieceSize abi.UnpaddedPieceSize + }{ + "error when fails to fetch chain head": { + fFnc: func(n *mocks.MockFullNode) { + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, xerrors.New("chain head error")).Times(1) + }, + expectedErrorStr: "chain head error", + }, + + "error when no piece matches": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedErrorStr: "failed to find matching piece", + }, + + "error when fails to fetch deal state": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("error 1")), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, xerrors.New("error 2")), + ) + + }, + expectedErrorStr: "failed to fetch storage deal state", + }, + + "verified is true even if one deal is verified and we get the correct piecesize": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedPieceSize: unpaddedSize, + expectedVerified: true, + }, + + "success even if one deal state fetch errors out but the other deal is verified and has the required piececid": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + VerifiedDeal: true, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("some error")), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedPieceSize: unpaddedSize, + expectedVerified: true, + }, + + "verified is false if both deals are unverified and we get the correct piece size": { + fFnc: func(n *mocks.MockFullNode) { + out1 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: pcid, + PieceSize: paddedSize, + VerifiedDeal: false, + }, + } + out2 := &api.MarketDeal{ + Proposal: market.DealProposal{ + PieceCID: testnet.GenerateCids(1)[0], + VerifiedDeal: false, + }, + } + + n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1) + gomock.InOrder( + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil), + n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil), + ) + + }, + expectedPieceSize: unpaddedSize, + expectedVerified: false, + }, + } + + for name, tc := range tcs { + tc := tc + t.Run(name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + // when test is done, assert expectations on all mock objects. + defer mockCtrl.Finish() + + mockFull := mocks.NewMockFullNode(mockCtrl) + rpn := &retrievalProviderNode{ + full: mockFull, + } + if tc.fFnc != nil { + tc.fFnc(mockFull) + } + + resp, err := rpn.GetRetrievalPricingInput(ctx, pcid, deals) + + if tc.expectedErrorStr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedErrorStr) + require.Equal(t, retrievalmarket.PricingInput{}, resp) + } else { + require.NoError(t, err) + require.Equal(t, tc.expectedPieceSize, resp.PieceSize) + require.Equal(t, tc.expectedVerified, resp.VerifiedDeal) + } + }) + } +} diff --git a/markets/storageadapter/client.go b/markets/storageadapter/client.go index 9357cc271..80ead2be3 100644 --- a/markets/storageadapter/client.go +++ b/markets/storageadapter/client.go @@ -160,8 +160,16 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor return 0, xerrors.Errorf("failed to resolve from msg ID addr: %w", err) } - if fromid != mi.Worker { - return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s", pubmsg.From, deal.Proposal.Provider) + var pubOk bool + pubAddrs := append([]address.Address{mi.Worker, mi.Owner}, mi.ControlAddresses...) + for _, a := range pubAddrs { + if fromid == a { + pubOk = true + break + } + } + if !pubOk { + return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s,%+v", pubmsg.From, deal.Proposal.Provider, pubAddrs) } if pubmsg.To != miner2.StorageMarketActorAddr { diff --git a/markets/storageadapter/dealpublisher.go b/markets/storageadapter/dealpublisher.go index 157c85ed7..9f7ba1629 100644 --- a/markets/storageadapter/dealpublisher.go +++ b/markets/storageadapter/dealpublisher.go @@ -7,27 +7,33 @@ import ( "sync" "time" + "github.com/ipfs/go-cid" "go.uber.org/fx" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/node/config" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage" ) type dealPublisherAPI interface { ChainHead(context.Context) (*types.TipSet, error) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + + WalletBalance(context.Context, address.Address) (types.BigInt, error) + WalletHas(context.Context, address.Address) (bool, error) + StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) + StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) } // DealPublisher batches deal publishing so that many deals can be included in @@ -40,6 +46,7 @@ type dealPublisherAPI interface { // publish message with all deals in the queue. type DealPublisher struct { api dealPublisherAPI + as *storage.AddressSelector ctx context.Context Shutdown context.CancelFunc @@ -87,14 +94,14 @@ type PublishMsgConfig struct { func NewDealPublisher( feeConfig *config.MinerFeeConfig, publishMsgCfg PublishMsgConfig, -) func(lc fx.Lifecycle, full api.FullNode) *DealPublisher { - return func(lc fx.Lifecycle, full api.FullNode) *DealPublisher { +) func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { + return func(lc fx.Lifecycle, full api.FullNode, as *storage.AddressSelector) *DealPublisher { maxFee := abi.NewTokenAmount(0) if feeConfig != nil { maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee) } publishSpec := &api.MessageSendSpec{MaxFee: maxFee} - dp := newDealPublisher(full, publishMsgCfg, publishSpec) + dp := newDealPublisher(full, as, publishMsgCfg, publishSpec) lc.Append(fx.Hook{ OnStop: func(ctx context.Context) error { dp.Shutdown() @@ -107,12 +114,14 @@ func NewDealPublisher( func newDealPublisher( dpapi dealPublisherAPI, + as *storage.AddressSelector, publishMsgCfg PublishMsgConfig, publishSpec *api.MessageSendSpec, ) *DealPublisher { ctx, cancel := context.WithCancel(context.Background()) return &DealPublisher{ api: dpapi, + as: as, ctx: ctx, Shutdown: cancel, maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg, @@ -345,9 +354,14 @@ func (p *DealPublisher) publishDealProposals(deals []market2.ClientDealProposal) return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err) } + addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero()) + if err != nil { + return cid.Undef, xerrors.Errorf("selecting address for publishing deals: %w", err) + } + smsg, err := p.api.MpoolPushMessage(p.ctx, &types.Message{ To: market.Address, - From: mi.Worker, + From: addr, Value: types.NewInt(0), Method: market.Methods.PublishStorageDeals, Params: params, diff --git a/markets/storageadapter/dealpublisher_test.go b/markets/storageadapter/dealpublisher_test.go index 746c67d0e..b2f107bf4 100644 --- a/markets/storageadapter/dealpublisher_test.go +++ b/markets/storageadapter/dealpublisher_test.go @@ -25,6 +25,7 @@ import ( ) func TestDealPublisher(t *testing.T) { + t.Skip("this test randomly fails in various subtests; see issue #6799") testCases := []struct { name string publishPeriod time.Duration @@ -94,7 +95,7 @@ func TestDealPublisher(t *testing.T) { dpapi := newDPAPI(t) // Create a deal publisher - dp := newDealPublisher(dpapi, PublishMsgConfig{ + dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ Period: tc.publishPeriod, MaxDealsPerMsg: tc.maxDealsPerMsg, }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) @@ -134,7 +135,7 @@ func TestForcePublish(t *testing.T) { // Create a deal publisher start := time.Now() publishPeriod := time.Hour - dp := newDealPublisher(dpapi, PublishMsgConfig{ + dp := newDealPublisher(dpapi, nil, PublishMsgConfig{ Period: publishPeriod, MaxDealsPerMsg: 10, }, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)}) @@ -320,6 +321,22 @@ func (d *dpAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec * return &types.SignedMessage{Message: *msg}, nil } +func (d *dpAPI) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) { + panic("don't call me") +} + +func (d *dpAPI) WalletHas(ctx context.Context, a address.Address) (bool, error) { + panic("don't call me") +} + +func (d *dpAPI) StateAccountKey(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { + panic("don't call me") +} + +func (d *dpAPI) StateLookupID(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) { + panic("don't call me") +} + func getClientActor(t *testing.T) address.Address { return tutils.NewActorAddr(t, "client") } diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index fbeaf3b3d..b899c0810 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -95,11 +95,11 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema return nil, xerrors.Errorf("deal.PublishCid can't be nil") } - sdInfo := sealing.DealInfo{ + sdInfo := api.PieceDealInfo{ DealID: deal.DealID, DealProposal: &deal.Proposal, PublishCid: deal.PublishCid, - DealSchedule: sealing.DealSchedule{ + DealSchedule: api.DealSchedule{ StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, }, @@ -240,19 +240,19 @@ func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context // TODO: better strategy (e.g. look for already unsealed) var best api.SealedRef - var bestSi sealing.SectorInfo + var bestSi api.SectorInfo for _, r := range refs { - si, err := n.secb.Miner.GetSectorInfo(r.SectorID) + si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false) if err != nil { return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err) } - if si.State == sealing.Proving { + if si.State == api.SectorState(sealing.Proving) { best = r bestSi = si break } } - if bestSi.State == sealing.UndefinedSectorState { + if bestSi.State == api.SectorState(sealing.UndefinedSectorState) { return 0, 0, 0, xerrors.New("no sealed sector found") } return best.SectorID, best.Offset, best.Size.Padded(), nil diff --git a/miner/miner.go b/miner/miner.go index 62bec5b55..1727f6942 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -6,6 +6,7 @@ import ( "crypto/rand" "encoding/binary" "fmt" + "os" "sync" "time" @@ -325,7 +326,9 @@ minerLoop: if err := m.sf.MinedBlock(b.Header, base.TipSet.Height()+base.NullRounds); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) - continue + if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" { + continue + } } blkKey := fmt.Sprintf("%d", b.Header.Height) @@ -419,7 +422,7 @@ func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error) // 1. func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *types.BlockMsg, err error) { log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids())) - start := build.Clock.Now() + tStart := build.Clock.Now() round := base.TipSet.Height() + base.NullRounds + 1 @@ -428,6 +431,9 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type var mbi *api.MiningBaseInfo var rbase types.BeaconEntry defer func() { + + var hasMinPower bool + // mbi can be nil if we are deep in penalty and there are 0 eligible sectors // in the current deadline. If this case - put together a dummy one for reporting // https://github.com/filecoin-project/lotus/blob/v1.9.0/chain/stmgr/utils.go#L500-L502 @@ -435,17 +441,24 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type mbi = &api.MiningBaseInfo{ NetworkPower: big.NewInt(-1), // we do not know how big the network is at this point EligibleForMining: false, - MinerPower: big.NewInt(0), // but we do know we do not have anything + MinerPower: big.NewInt(0), // but we do know we do not have anything eligible + } + + // try to opportunistically pull actual power and plug it into the fake mbi + if pow, err := m.api.StateMinerPower(ctx, m.address, base.TipSet.Key()); err == nil && pow != nil { + hasMinPower = pow.HasMinPower + mbi.MinerPower = pow.MinerPower.QualityAdjPower + mbi.NetworkPower = pow.TotalPower.QualityAdjPower } } - isLate := uint64(start.Unix()) > (base.TipSet.MinTimestamp() + uint64(base.NullRounds*builtin.EpochDurationSeconds) + build.PropagationDelaySecs) + isLate := uint64(tStart.Unix()) > (base.TipSet.MinTimestamp() + uint64(base.NullRounds*builtin.EpochDurationSeconds) + build.PropagationDelaySecs) logStruct := []interface{}{ - "tookMilliseconds", (build.Clock.Now().UnixNano() - start.UnixNano()) / 1_000_000, + "tookMilliseconds", (build.Clock.Now().UnixNano() - tStart.UnixNano()) / 1_000_000, "forRound", int64(round), "baseEpoch", int64(base.TipSet.Height()), - "baseDeltaSeconds", uint64(start.Unix()) - base.TipSet.MinTimestamp(), + "baseDeltaSeconds", uint64(tStart.Unix()) - base.TipSet.MinTimestamp(), "nullRounds", int64(base.NullRounds), "lateStart", isLate, "beaconEpoch", rbase.Round, @@ -459,7 +472,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type if err != nil { log.Errorw("completed mineOne", logStruct...) - } else if isLate { + } else if isLate || (hasMinPower && !mbi.EligibleForMining) { log.Warnw("completed mineOne", logStruct...) } else { log.Infow("completed mineOne", logStruct...) @@ -480,16 +493,10 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type return nil, nil } - tMBI := build.Clock.Now() - - beaconPrev := mbi.PrevBeaconEntry - - tDrand := build.Clock.Now() - bvals := mbi.BeaconEntries - tPowercheck := build.Clock.Now() - rbase = beaconPrev + bvals := mbi.BeaconEntries + rbase = mbi.PrevBeaconEntry if len(bvals) > 0 { rbase = bvals[len(bvals)-1] } @@ -553,7 +560,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type } tCreateBlock := build.Clock.Now() - dur := tCreateBlock.Sub(start) + dur := tCreateBlock.Sub(tStart) parentMiners := make([]address.Address, len(base.TipSet.Blocks())) for i, header := range base.TipSet.Blocks() { parentMiners[i] = header.Miner @@ -561,9 +568,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type log.Infow("mined new block", "cid", minedBlock.Cid(), "height", int64(minedBlock.Header.Height), "miner", minedBlock.Header.Miner, "parents", parentMiners, "parentTipset", base.TipSet.Key().String(), "took", dur) if dur > time.Second*time.Duration(build.BlockDelaySecs) { log.Warnw("CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up", - "tMinerBaseInfo ", tMBI.Sub(start), - "tDrand ", tDrand.Sub(tMBI), - "tPowercheck ", tPowercheck.Sub(tDrand), + "tPowercheck ", tPowercheck.Sub(tStart), "tTicket ", tTicket.Sub(tPowercheck), "tSeed ", tSeed.Sub(tTicket), "tProof ", tProof.Sub(tSeed), diff --git a/node/builder.go b/node/builder.go index 54e58e28a..771368917 100644 --- a/node/builder.go +++ b/node/builder.go @@ -8,14 +8,7 @@ import ( metricsi "github.com/ipfs/go-metrics-interface" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/exchange" - rpcstmgr "github.com/filecoin-project/lotus/chain/stmgr/rpc" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/node/hello" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/system" logging "github.com/ipfs/go-log/v2" @@ -33,52 +26,23 @@ import ( "go.uber.org/fx" "golang.org/x/xerrors" - "github.com/filecoin-project/go-fil-markets/discovery" - discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - - storage2 "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/market" - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/messagesigner" - "github.com/filecoin-project/lotus/chain/metrics" - "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" - "github.com/filecoin-project/lotus/chain/wallet/remotewallet" - sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/stores" - "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/peermgr" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/lotus/markets/dealfilter" "github.com/filecoin-project/lotus/markets/storageadapter" - "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl/common" - "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/impl/common/mock" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/modules/lp2p" "github.com/filecoin-project/lotus/node/modules/testing" "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/paychmgr" - "github.com/filecoin-project/lotus/paychmgr/settler" - "github.com/filecoin-project/lotus/storage" - "github.com/filecoin-project/lotus/storage/sectorblocks" ) //nolint:deadcode,varcheck @@ -167,9 +131,11 @@ type Settings struct { nodeType repo.RepoType - Online bool // Online option applied + Base bool // Base option applied Config bool // Config option applied Lite bool // Start node in "lite" mode + + enableLibp2pNode bool } // Basic lotus-app services @@ -238,7 +204,7 @@ var LibP2P = Options( Override(ConnGaterKey, lp2p.ConnGaterOption), ) -func isType(t repo.RepoType) func(s *Settings) bool { +func IsType(t repo.RepoType) func(s *Settings) bool { return func(s *Settings) bool { return s.nodeType == t } } @@ -246,243 +212,22 @@ func isFullOrLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode } func isFullNode(s *Settings) bool { return s.nodeType == repo.FullNode && !s.Lite } func isLiteNode(s *Settings) bool { return s.nodeType == repo.FullNode && s.Lite } -// Chain node provides access to the Filecoin blockchain, by setting up a full -// validator node, or by delegating some actions to other nodes (lite mode) -var ChainNode = Options( - // Full node or lite node - // TODO: Fix offline mode - - // Consensus settings - Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig), - Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), - Override(new(dtypes.NetworkName), modules.NetworkName), - Override(new(modules.Genesis), modules.ErrorGenesis), - Override(new(dtypes.AfterGenesisSet), modules.SetGenesis), - Override(SetGenesisKey, modules.DoSetGenesis), - Override(new(beacon.Schedule), modules.RandomSchedule), - - // Network bootstrap - Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), - Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), - - // Consensus: crypto dependencies - Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), - - // Consensus: VM - Override(new(vm.SyscallBuilder), vm.Syscalls), - - // Consensus: Chain storage/access - Override(new(*store.ChainStore), modules.ChainStore), - Override(new(*stmgr.StateManager), modules.StateManager), - Override(new(dtypes.ChainBitswap), modules.ChainBitswap), - Override(new(dtypes.ChainBlockService), modules.ChainBlockService), // todo: unused - - // Consensus: Chain sync - - // We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value. - // It will be called implicitly by the Syncer constructor. - Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }), - Override(new(*chain.Syncer), modules.NewSyncer), - Override(new(exchange.Client), exchange.NewClient), - - // Chain networking - Override(new(*hello.Service), hello.NewHelloService), - Override(new(exchange.Server), exchange.NewServer), - Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr), - - // Chain mining API dependencies - Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), - - // Service: Message Pool - Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc), - Override(new(*messagepool.MessagePool), modules.MessagePool), - Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)), - - // Shared graphsync (markets, serving chain) - Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultFullNode().Client.SimultaneousTransfers)), - - // Service: Wallet - Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), - Override(new(*wallet.LocalWallet), wallet.NewWallet), - Override(new(wallet.Default), From(new(*wallet.LocalWallet))), - Override(new(api.Wallet), From(new(wallet.MultiWallet))), - - // Service: Payment channels - Override(new(paychmgr.PaychAPI), From(new(modules.PaychAPI))), - Override(new(*paychmgr.Store), modules.NewPaychStore), - Override(new(*paychmgr.Manager), modules.NewManager), - Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager), - Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), - - // Markets (common) - Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery), - - // Markets (retrieval) - Override(new(discovery.PeerResolver), modules.RetrievalResolver), - Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient), - Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), - - // Markets (storage) - Override(new(*market.FundManager), market.NewFundManager), - Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), - Override(new(storagemarket.StorageClient), modules.StorageClient), - Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), - Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds), - - Override(new(*full.GasPriceCache), full.NewGasPriceCache), - - // Lite node API - ApplyIf(isLiteNode, - Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))), - Override(new(full.ChainModuleAPI), From(new(api.Gateway))), - Override(new(full.GasModuleAPI), From(new(api.Gateway))), - Override(new(full.MpoolModuleAPI), From(new(api.Gateway))), - Override(new(full.StateModuleAPI), From(new(api.Gateway))), - Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), - ), - - // Full node API / service startup - ApplyIf(isFullNode, - Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))), - Override(new(full.ChainModuleAPI), From(new(full.ChainModule))), - Override(new(full.GasModuleAPI), From(new(full.GasModule))), - Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))), - Override(new(full.StateModuleAPI), From(new(full.StateModule))), - Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))), - - Override(RunHelloKey, modules.RunHello), - Override(RunChainExchangeKey, modules.RunChainExchange), - Override(RunPeerMgrKey, modules.RunPeerMgr), - Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages), - Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks), - ), -) - -var MinerNode = Options( - // API dependencies - Override(new(api.Common), From(new(common.CommonAPI))), - Override(new(sectorstorage.StorageAuth), modules.StorageAuth), - - // Actor config - Override(new(dtypes.MinerAddress), modules.MinerAddress), - Override(new(dtypes.MinerID), modules.MinerID), - Override(new(abi.RegisteredSealProof), modules.SealProofType), - Override(new(dtypes.NetworkName), modules.StorageNetworkName), - - // Sector storage - Override(new(*stores.Index), stores.NewIndex), - Override(new(stores.SectorIndex), From(new(*stores.Index))), - Override(new(stores.LocalStorage), From(new(repo.LockedRepo))), - Override(new(*sectorstorage.Manager), modules.SectorStorage), - Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))), - Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))), - - // Sector storage: Proofs - Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), - Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), - Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), - - // Sealing - Override(new(sealing.SectorIDCounter), modules.SectorIDCounter), - Override(GetParamsKey, modules.GetParams), - - // Mining / proving - Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), - Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)), - Override(new(*miner.Miner), modules.SetupBlockProducer), - Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver), - - Override(new(*storage.AddressSelector), modules.AddressSelector(nil)), - - // Markets - Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore), - Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), - Override(new(dtypes.StagingDAG), modules.StagingDAG), - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync), - Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), - Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), - - // Markets (retrieval) - Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), - Override(HandleRetrievalKey, modules.HandleRetrieval), - - // Markets (storage) - Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), - Override(new(*storedask.StoredAsk), modules.NewStorageAsk), - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), - Override(new(storagemarket.StorageProvider), modules.StorageProvider), - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil, nil)), - Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), - Override(HandleDealsKey, modules.HandleDeals), - - // Config (todo: get a real property system) - Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), - Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), - Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc), - Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc), - Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), - Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc), - Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc), - Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc), - Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc), - Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc), - Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc), - Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc), - Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc), - Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc), -) - -// Online sets up basic libp2p node -func Online() Option { - +func Base() Option { return Options( - // make sure that online is applied before Config. - // This is important because Config overrides some of Online units - func(s *Settings) error { s.Online = true; return nil }, + func(s *Settings) error { s.Base = true; return nil }, // mark Base as applied ApplyIf(func(s *Settings) bool { return s.Config }, - Error(errors.New("the Online option must be set before Config option")), + Error(errors.New("the Base() option must be set before Config option")), + ), + ApplyIf(func(s *Settings) bool { return s.enableLibp2pNode }, + LibP2P, ), - - LibP2P, - ApplyIf(isFullOrLiteNode, ChainNode), - ApplyIf(isType(repo.StorageMiner), MinerNode), - ) -} - -func StorageMiner(out *api.StorageMiner) Option { - return Options( - ApplyIf(func(s *Settings) bool { return s.Config }, - Error(errors.New("the StorageMiner option must be set before Config option")), - ), - ApplyIf(func(s *Settings) bool { return s.Online }, - Error(errors.New("the StorageMiner option must be set before Online option")), - ), - - func(s *Settings) error { - s.nodeType = repo.StorageMiner - return nil - }, - - func(s *Settings) error { - resAPI := &impl.StorageMinerAPI{} - s.invokes[ExtractApiKey] = fx.Populate(resAPI) - *out = resAPI - return nil - }, + ApplyIf(IsType(repo.StorageMiner), MinerNode), ) } // Config sets up constructors based on the provided Config -func ConfigCommon(cfg *config.Common) Option { +func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option { return Options( func(s *Settings) error { s.Config = true; return nil }, Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) { @@ -491,14 +236,21 @@ func ConfigCommon(cfg *config.Common) Option { Override(SetApiEndpointKey, func(lr repo.LockedRepo, e dtypes.APIEndpoint) error { return lr.SetAPIEndpoint(e) }), - Override(new(sectorstorage.URLs), func(e dtypes.APIEndpoint) (sectorstorage.URLs, error) { + Override(new(stores.URLs), func(e dtypes.APIEndpoint) (stores.URLs, error) { ip := cfg.API.RemoteListenAddress - var urls sectorstorage.URLs + var urls stores.URLs urls = append(urls, "http://"+ip+"/remote") // TODO: This makes no assumptions, and probably could... return urls, nil }), - ApplyIf(func(s *Settings) bool { return s.Online }, + ApplyIf(func(s *Settings) bool { return s.Base }), // apply only if Base has already been applied + If(!enableLibp2pNode, + Override(new(common.NetAPI), From(new(mock.MockNetAPI))), + Override(new(api.Common), From(new(common.CommonAPI))), + ), + If(enableLibp2pNode, + Override(new(common.NetAPI), From(new(common.Libp2pNetAPI))), + Override(new(api.Common), From(new(common.CommonAPI))), Override(StartListeningKey, lp2p.StartListening(cfg.Libp2p.ListenAddresses)), Override(ConnectionManagerKey, lp2p.ConnectionManager( cfg.Libp2p.ConnMgrLow, @@ -511,78 +263,15 @@ func ConfigCommon(cfg *config.Common) Option { ApplyIf(func(s *Settings) bool { return len(cfg.Libp2p.BootstrapPeers) > 0 }, Override(new(dtypes.BootstrapPeers), modules.ConfigBootstrap(cfg.Libp2p.BootstrapPeers)), ), + + Override(AddrsFactoryKey, lp2p.AddrsFactory( + cfg.Libp2p.AnnounceAddresses, + cfg.Libp2p.NoAnnounceAddresses)), ), - Override(AddrsFactoryKey, lp2p.AddrsFactory( - cfg.Libp2p.AnnounceAddresses, - cfg.Libp2p.NoAnnounceAddresses)), Override(new(dtypes.MetadataDS), modules.Datastore(cfg.Backup.DisableMetadataLog)), ) } -func ConfigFullNode(c interface{}) Option { - cfg, ok := c.(*config.FullNode) - if !ok { - return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) - } - - ipfsMaddr := cfg.Client.IpfsMAddr - return Options( - ConfigCommon(&cfg.Common), - - If(cfg.Client.UseIpfs, - Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)), - If(cfg.Client.IpfsUseForRetrieval, - Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager), - ), - ), - Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)), - - If(cfg.Metrics.HeadNotifs, - Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)), - ), - - If(cfg.Wallet.RemoteBackend != "", - Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), - ), - If(cfg.Wallet.EnableLedger, - Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet), - ), - If(cfg.Wallet.DisableLocal, - Unset(new(*wallet.LocalWallet)), - Override(new(wallet.Default), wallet.NilDefault), - ), - ) -} - -func ConfigStorageMiner(c interface{}) Option { - cfg, ok := c.(*config.StorageMiner) - if !ok { - return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) - } - - return Options( - ConfigCommon(&cfg.Common), - - If(cfg.Dealmaking.Filter != "", - Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), - ), - - If(cfg.Dealmaking.RetrievalFilter != "", - Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), - ), - - Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{ - Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod), - MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg, - })), - Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)), - - Override(new(sectorstorage.SealerConfig), cfg.Storage), - Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)), - Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)), - ) -} - func Repo(r repo.Repo) Option { return func(settings *Settings) error { lr, err := r.Lock(settings.nodeType) @@ -612,19 +301,25 @@ func Repo(r repo.Repo) Option { Override(new(dtypes.UniversalBlockstore), modules.UniversalBlockstore), If(cfg.EnableSplitstore, + If(cfg.Splitstore.ColdStoreType == "universal", + Override(new(dtypes.ColdBlockstore), From(new(dtypes.UniversalBlockstore)))), + If(cfg.Splitstore.ColdStoreType == "discard", + Override(new(dtypes.ColdBlockstore), modules.DiscardColdBlockstore)), If(cfg.Splitstore.HotStoreType == "badger", Override(new(dtypes.HotBlockstore), modules.BadgerHotBlockstore)), Override(new(dtypes.SplitBlockstore), modules.SplitBlockstore(cfg)), Override(new(dtypes.BasicChainBlockstore), modules.ChainSplitBlockstore), Override(new(dtypes.BasicStateBlockstore), modules.StateSplitBlockstore), Override(new(dtypes.BaseBlockstore), From(new(dtypes.SplitBlockstore))), - Override(new(dtypes.ExposedBlockstore), From(new(dtypes.SplitBlockstore))), + Override(new(dtypes.ExposedBlockstore), modules.ExposedSplitBlockstore), + Override(new(dtypes.GCReferenceProtector), modules.SplitBlockstoreGCReferenceProtector), ), If(!cfg.EnableSplitstore, Override(new(dtypes.BasicChainBlockstore), modules.ChainFlatBlockstore), Override(new(dtypes.BasicStateBlockstore), modules.StateFlatBlockstore), Override(new(dtypes.BaseBlockstore), From(new(dtypes.UniversalBlockstore))), Override(new(dtypes.ExposedBlockstore), From(new(dtypes.UniversalBlockstore))), + Override(new(dtypes.GCReferenceProtector), modules.NoopGCReferenceProtector), ), Override(new(dtypes.ChainBlockstore), From(new(dtypes.BasicChainBlockstore))), @@ -649,37 +344,12 @@ func Repo(r repo.Repo) Option { Override(new(*dtypes.APIAlg), modules.APISecret), - ApplyIf(isType(repo.FullNode), ConfigFullNode(c)), - ApplyIf(isType(repo.StorageMiner), ConfigStorageMiner(c)), + ApplyIf(IsType(repo.FullNode), ConfigFullNode(c)), + ApplyIf(IsType(repo.StorageMiner), ConfigStorageMiner(c)), )(settings) } } -type FullOption = Option - -func Lite(enable bool) FullOption { - return func(s *Settings) error { - s.Lite = enable - return nil - } -} - -func FullAPI(out *api.FullNode, fopts ...FullOption) Option { - return Options( - func(s *Settings) error { - s.nodeType = repo.FullNode - return nil - }, - Options(fopts...), - func(s *Settings) error { - resAPI := &impl.FullNodeAPI{} - s.invokes[ExtractApiKey] = fx.Populate(resAPI) - *out = resAPI - return nil - }, - ) -} - type StopFunc func(context.Context) error // New builds and starts new Filecoin node diff --git a/node/builder_chain.go b/node/builder_chain.go new file mode 100644 index 000000000..1447a4df7 --- /dev/null +++ b/node/builder_chain.go @@ -0,0 +1,218 @@ +package node + +import ( + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-fil-markets/discovery" + discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/exchange" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" + "github.com/filecoin-project/lotus/chain/market" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/messagesigner" + "github.com/filecoin-project/lotus/chain/metrics" + "github.com/filecoin-project/lotus/chain/stmgr" + rpcstmgr "github.com/filecoin-project/lotus/chain/stmgr/rpc" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chain/wallet" + ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" + "github.com/filecoin-project/lotus/chain/wallet/remotewallet" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/lib/peermgr" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/hello" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/paychmgr" + "github.com/filecoin-project/lotus/paychmgr/settler" +) + +// Chain node provides access to the Filecoin blockchain, by setting up a full +// validator node, or by delegating some actions to other nodes (lite mode) +var ChainNode = Options( + // Full node or lite node + // TODO: Fix offline mode + + // Consensus settings + Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig), + Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()), + Override(new(dtypes.NetworkName), modules.NetworkName), + Override(new(modules.Genesis), modules.ErrorGenesis), + Override(new(dtypes.AfterGenesisSet), modules.SetGenesis), + Override(SetGenesisKey, modules.DoSetGenesis), + Override(new(beacon.Schedule), modules.RandomSchedule), + + // Network bootstrap + Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap), + Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap), + + // Consensus: crypto dependencies + Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), + + // Consensus: VM + Override(new(vm.SyscallBuilder), vm.Syscalls), + + // Consensus: Chain storage/access + Override(new(*store.ChainStore), modules.ChainStore), + Override(new(*stmgr.StateManager), modules.StateManager), + Override(new(dtypes.ChainBitswap), modules.ChainBitswap), + Override(new(dtypes.ChainBlockService), modules.ChainBlockService), // todo: unused + + // Consensus: Chain sync + + // We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value. + // It will be called implicitly by the Syncer constructor. + Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }), + Override(new(*chain.Syncer), modules.NewSyncer), + Override(new(exchange.Client), exchange.NewClient), + + // Chain networking + Override(new(*hello.Service), hello.NewHelloService), + Override(new(exchange.Server), exchange.NewServer), + Override(new(*peermgr.PeerMgr), peermgr.NewPeerMgr), + + // Chain mining API dependencies + Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), + + // Service: Message Pool + Override(new(dtypes.DefaultMaxFeeFunc), modules.NewDefaultMaxFeeFunc), + Override(new(*messagepool.MessagePool), modules.MessagePool), + Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)), + + // Shared graphsync (markets, serving chain) + Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultFullNode().Client.SimultaneousTransfers)), + + // Service: Wallet + Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), + Override(new(*wallet.LocalWallet), wallet.NewWallet), + Override(new(wallet.Default), From(new(*wallet.LocalWallet))), + Override(new(api.Wallet), From(new(wallet.MultiWallet))), + + // Service: Payment channels + Override(new(paychmgr.PaychAPI), From(new(modules.PaychAPI))), + Override(new(*paychmgr.Store), modules.NewPaychStore), + Override(new(*paychmgr.Manager), modules.NewManager), + Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager), + Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels), + + // Markets (common) + Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery), + + // Markets (retrieval) + Override(new(discovery.PeerResolver), modules.RetrievalResolver), + Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient), + Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer), + + // Markets (storage) + Override(new(*market.FundManager), market.NewFundManager), + Override(new(dtypes.ClientDatastore), modules.NewClientDatastore), + Override(new(storagemarket.StorageClient), modules.StorageClient), + Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter), + Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds), + + Override(new(*full.GasPriceCache), full.NewGasPriceCache), + + // Lite node API + ApplyIf(isLiteNode, + Override(new(messagepool.Provider), messagepool.NewProviderLite), + Override(new(messagesigner.MpoolNonceAPI), From(new(modules.MpoolNonceAPI))), + Override(new(full.ChainModuleAPI), From(new(api.Gateway))), + Override(new(full.GasModuleAPI), From(new(api.Gateway))), + Override(new(full.MpoolModuleAPI), From(new(api.Gateway))), + Override(new(full.StateModuleAPI), From(new(api.Gateway))), + Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), + ), + + // Full node API / service startup + ApplyIf(isFullNode, + Override(new(messagepool.Provider), messagepool.NewProvider), + Override(new(messagesigner.MpoolNonceAPI), From(new(*messagepool.MessagePool))), + Override(new(full.ChainModuleAPI), From(new(full.ChainModule))), + Override(new(full.GasModuleAPI), From(new(full.GasModule))), + Override(new(full.MpoolModuleAPI), From(new(full.MpoolModule))), + Override(new(full.StateModuleAPI), From(new(full.StateModule))), + Override(new(stmgr.StateManagerAPI), From(new(*stmgr.StateManager))), + + Override(RunHelloKey, modules.RunHello), + Override(RunChainExchangeKey, modules.RunChainExchange), + Override(RunPeerMgrKey, modules.RunPeerMgr), + Override(HandleIncomingMessagesKey, modules.HandleIncomingMessages), + Override(HandleIncomingBlocksKey, modules.HandleIncomingBlocks), + ), +) + +func ConfigFullNode(c interface{}) Option { + cfg, ok := c.(*config.FullNode) + if !ok { + return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) + } + + enableLibp2pNode := true // always enable libp2p for full nodes + + ipfsMaddr := cfg.Client.IpfsMAddr + return Options( + ConfigCommon(&cfg.Common, enableLibp2pNode), + + If(cfg.Client.UseIpfs, + Override(new(dtypes.ClientBlockstore), modules.IpfsClientBlockstore(ipfsMaddr, cfg.Client.IpfsOnlineMode)), + If(cfg.Client.IpfsUseForRetrieval, + Override(new(dtypes.ClientRetrievalStoreManager), modules.ClientBlockstoreRetrievalStoreManager), + ), + ), + Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfers)), + + If(cfg.Metrics.HeadNotifs, + Override(HeadMetricsKey, metrics.SendHeadNotifs(cfg.Metrics.Nickname)), + ), + + If(cfg.Wallet.RemoteBackend != "", + Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), + ), + If(cfg.Wallet.EnableLedger, + Override(new(*ledgerwallet.LedgerWallet), ledgerwallet.NewWallet), + ), + If(cfg.Wallet.DisableLocal, + Unset(new(*wallet.LocalWallet)), + Override(new(wallet.Default), wallet.NilDefault), + ), + ) +} + +type FullOption = Option + +func Lite(enable bool) FullOption { + return func(s *Settings) error { + s.Lite = enable + return nil + } +} + +func FullAPI(out *api.FullNode, fopts ...FullOption) Option { + return Options( + func(s *Settings) error { + s.nodeType = repo.FullNode + s.enableLibp2pNode = true + return nil + }, + Options(fopts...), + func(s *Settings) error { + resAPI := &impl.FullNodeAPI{} + s.invokes[ExtractApiKey] = fx.Populate(resAPI) + *out = resAPI + return nil + }, + ) +} diff --git a/node/builder_miner.go b/node/builder_miner.go new file mode 100644 index 000000000..0c0f9d15a --- /dev/null +++ b/node/builder_miner.go @@ -0,0 +1,223 @@ +package node + +import ( + "errors" + "time" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/markets/retrievaladapter" + storage2 "github.com/filecoin-project/specs-storage/storage" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/markets/dealfilter" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/miner" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/storage" + "github.com/filecoin-project/lotus/storage/sectorblocks" +) + +var MinerNode = Options( + Override(new(sectorstorage.StorageAuth), modules.StorageAuth), + + // Actor config + Override(new(dtypes.MinerAddress), modules.MinerAddress), + Override(new(dtypes.MinerID), modules.MinerID), + Override(new(abi.RegisteredSealProof), modules.SealProofType), + Override(new(dtypes.NetworkName), modules.StorageNetworkName), + + // Mining / proving + Override(new(*storage.AddressSelector), modules.AddressSelector(nil)), +) + +func ConfigStorageMiner(c interface{}) Option { + cfg, ok := c.(*config.StorageMiner) + if !ok { + return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) + } + + pricingConfig := cfg.Dealmaking.RetrievalPricing + if pricingConfig.Strategy == config.RetrievalPricingExternalMode { + if pricingConfig.External == nil { + return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil")) + } + + if pricingConfig.External.Path == "" { + return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty")) + } + } else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode { + return Error(xerrors.New("retrieval pricing policy must be either default or external")) + } + + enableLibp2pNode := cfg.Subsystems.EnableMarkets // we enable libp2p nodes if the storage market subsystem is enabled, otherwise we don't + + return Options( + ConfigCommon(&cfg.Common, enableLibp2pNode), + + Override(new(stores.LocalStorage), From(new(repo.LockedRepo))), + Override(new(*stores.Local), modules.LocalStorage), + Override(new(*stores.Remote), modules.RemoteStorage), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), + + If(!cfg.Subsystems.EnableMining, + If(cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))), + If(cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can only be enabled on a mining node"))), + ), + If(cfg.Subsystems.EnableMining, + If(!cfg.Subsystems.EnableSealing, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))), + If(!cfg.Subsystems.EnableSectorStorage, Error(xerrors.Errorf("sealing can't be disabled on a mining node yet"))), + + // Sector storage: Proofs + Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), + Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), + Override(new(storage2.Prover), From(new(sectorstorage.SectorManager))), + + // Sealing (todo should be under EnableSealing, but storagefsm is currently bundled with storage.Miner) + Override(new(sealing.SectorIDCounter), modules.SectorIDCounter), + Override(GetParamsKey, modules.GetParams), + + Override(new(dtypes.SetSealingConfigFunc), modules.NewSetSealConfigFunc), + Override(new(dtypes.GetSealingConfigFunc), modules.NewGetSealConfigFunc), + + // Mining / proving + Override(new(*slashfilter.SlashFilter), modules.NewSlashFilter), + Override(new(*storage.Miner), modules.StorageMiner(config.DefaultStorageMiner().Fees)), + Override(new(*miner.Miner), modules.SetupBlockProducer), + Override(new(gen.WinningPoStProver), storage.NewWinningPoStProver), + Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)), + Override(new(sectorblocks.SectorBuilder), From(new(*storage.Miner))), + ), + + If(cfg.Subsystems.EnableSectorStorage, + // Sector storage + Override(new(*stores.Index), stores.NewIndex), + Override(new(stores.SectorIndex), From(new(*stores.Index))), + Override(new(*sectorstorage.Manager), modules.SectorStorage), + Override(new(sectorstorage.Unsealer), From(new(*sectorstorage.Manager))), + Override(new(sectorstorage.SectorManager), From(new(*sectorstorage.Manager))), + Override(new(storiface.WorkerReturn), From(new(sectorstorage.SectorManager))), + ), + + If(!cfg.Subsystems.EnableSectorStorage, + Override(new(sectorstorage.StorageAuth), modules.StorageAuthWithURL(cfg.Subsystems.SectorIndexApiInfo)), + Override(new(modules.MinerStorageService), modules.ConnectStorageService(cfg.Subsystems.SectorIndexApiInfo)), + Override(new(sectorstorage.Unsealer), From(new(modules.MinerStorageService))), + Override(new(sectorblocks.SectorBuilder), From(new(modules.MinerStorageService))), + ), + If(!cfg.Subsystems.EnableSealing, + Override(new(modules.MinerSealingService), modules.ConnectSealingService(cfg.Subsystems.SealerApiInfo)), + Override(new(stores.SectorIndex), From(new(modules.MinerSealingService))), + ), + + If(cfg.Subsystems.EnableMarkets, + // Markets + Override(new(dtypes.StagingMultiDstore), modules.StagingMultiDatastore), + Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), + Override(new(dtypes.StagingDAG), modules.StagingDAG), + Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfers)), + Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), + Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), + + // Markets (retrieval deps) + Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{ + RetrievalPricing: &config.RetrievalPricing{ + Strategy: config.RetrievalPricingDefaultMode, + Default: &config.RetrievalPricingDefault{}, + }, + })), + Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)), + + // Markets (retrieval) + Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), + Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), + Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)), + Override(HandleRetrievalKey, modules.HandleRetrieval), + + // Markets (storage) + Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), + Override(new(*storedask.StoredAsk), modules.NewStorageAsk), + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(nil)), + Override(new(storagemarket.StorageProvider), modules.StorageProvider), + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})), + Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds), + Override(HandleDealsKey, modules.HandleDeals), + + // Config (todo: get a real property system) + Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc), + Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc), + Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc), + Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc), + Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc), + Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc), + Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc), + Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc), + Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc), + Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc), + Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc), + Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc), + Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc), + Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc), + Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc), + + If(cfg.Dealmaking.Filter != "", + Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))), + ), + + If(cfg.Dealmaking.RetrievalFilter != "", + Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))), + ), + Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{ + Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod), + MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg, + })), + Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)), + ), + + Override(new(sectorstorage.SealerConfig), cfg.Storage), + Override(new(*storage.AddressSelector), modules.AddressSelector(&cfg.Addresses)), + ) +} + +func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConfig) Option { + return Options( + ApplyIf(func(s *Settings) bool { return s.Config }, + Error(errors.New("the StorageMiner option must be set before Config option")), + ), + + func(s *Settings) error { + s.nodeType = repo.StorageMiner + s.enableLibp2pNode = subsystemsCfg.EnableMarkets + return nil + }, + + func(s *Settings) error { + resAPI := &impl.StorageMinerAPI{} + s.invokes[ExtractApiKey] = fx.Populate(resAPI) + *out = resAPI + return nil + }, + ) +} diff --git a/node/config/def.go b/node/config/def.go index a108402e3..2ba70d892 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -14,6 +14,14 @@ import ( sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" ) +const ( + // RetrievalPricingDefault configures the node to use the default retrieval pricing policy. + RetrievalPricingDefaultMode = "default" + // RetrievalPricingExternal configures the node to use the external retrieval pricing script + // configured by the user. + RetrievalPricingExternalMode = "external" +) + // Common is common config between full node and miner type Common struct { API API @@ -42,6 +50,7 @@ type Backup struct { type StorageMiner struct { Common + Subsystems MinerSubsystemConfig Dealmaking DealmakingConfig Sealing SealingConfig Storage sectorstorage.SealerConfig @@ -49,6 +58,16 @@ type StorageMiner struct { Addresses MinerAddressConfig } +type MinerSubsystemConfig struct { + EnableMining bool + EnableSealing bool + EnableSectorStorage bool + EnableMarkets bool + + SealerApiInfo string // if EnableSealing == false + SectorIndexApiInfo string // if EnableSectorStorage == false +} + type DealmakingConfig struct { ConsiderOnlineStorageDeals bool ConsiderOfflineStorageDeals bool @@ -70,8 +89,34 @@ type DealmakingConfig struct { // as a multiplier of the minimum collateral bound MaxProviderCollateralMultiplier uint64 + // The maximum number of parallel online data transfers (storage+retrieval) + SimultaneousTransfers uint64 + Filter string RetrievalFilter string + + RetrievalPricing *RetrievalPricing +} + +type RetrievalPricing struct { + Strategy string // possible values: "default", "external" + + Default *RetrievalPricingDefault + External *RetrievalPricingExternal +} + +type RetrievalPricingExternal struct { + // Path of the external script that will be run to price a retrieval deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external". + Path string +} + +type RetrievalPricingDefault struct { + // VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal + // of a payloadCid that belongs to a verified storage deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". + // default value is true + VerifiedDealsFreeTransfer bool } type SealingConfig struct { @@ -91,6 +136,13 @@ type SealingConfig struct { // Run sector finalization before submitting sector proof to the chain FinalizeEarly bool + // Whether to use available miner balance for sector collateral instead of sending it with each message + CollateralFromMinerBalance bool + // Minimum available balance to keep in the miner actor before sending it with messages + AvailableBalanceBuffer types.FIL + // Don't send collateral with messages even if there is no available balance in the miner actor + DisableCollateralFallback bool + // enable / disable precommit batching (takes effect after nv13) BatchPreCommits bool // maximum precommit batch size - batches will be sent immediately above this size @@ -148,9 +200,10 @@ type MinerFeeConfig struct { } type MinerAddressConfig struct { - PreCommitControl []string - CommitControl []string - TerminateControl []string + PreCommitControl []string + CommitControl []string + TerminateControl []string + DealPublishControl []string // DisableOwnerFallback disables usage of the owner address for messages // sent automatically @@ -195,12 +248,9 @@ type Chainstore struct { } type Splitstore struct { - HotStoreType string - TrackingStoreType string - MarkSetType string - EnableFullCompaction bool - EnableGC bool // EXPERIMENTAL - Archival bool + ColdStoreType string + HotStoreType string + MarkSetType string } // // Full Node @@ -271,7 +321,9 @@ func DefaultFullNode() *FullNode { Chainstore: Chainstore{ EnableSplitstore: false, Splitstore: Splitstore{ - HotStoreType: "badger", + ColdStoreType: "universal", + HotStoreType: "badger", + MarkSetType: "map", }, }, } @@ -289,6 +341,10 @@ func DefaultStorageMiner() *StorageMiner { AlwaysKeepUnsealedCopy: true, FinalizeEarly: false, + CollateralFromMinerBalance: false, + AvailableBalanceBuffer: types.FIL(big.Zero()), + DisableCollateralFallback: false, + BatchPreCommits: true, MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket @@ -317,6 +373,9 @@ func DefaultStorageMiner() *StorageMiner { // Default to 10 - tcp should still be able to figure this out, and // it's the ratio between 10gbit / 1gbit ParallelFetchLimit: 10, + + // By default use the hardware resource filtering strategy. + ResourceFiltering: sectorstorage.ResourceFilteringHardware, }, Dealmaking: DealmakingConfig{ @@ -333,6 +392,25 @@ func DefaultStorageMiner() *StorageMiner { PublishMsgPeriod: Duration(time.Hour), MaxDealsPerPublishMsg: 8, MaxProviderCollateralMultiplier: 2, + + SimultaneousTransfers: DefaultSimultaneousTransfers, + + RetrievalPricing: &RetrievalPricing{ + Strategy: RetrievalPricingDefaultMode, + Default: &RetrievalPricingDefault{ + VerifiedDealsFreeTransfer: true, + }, + External: &RetrievalPricingExternal{ + Path: "", + }, + }, + }, + + Subsystems: MinerSubsystemConfig{ + EnableMining: true, + EnableSealing: true, + EnableSectorStorage: true, + EnableMarkets: true, }, Fees: MinerFeeConfig{ @@ -355,8 +433,10 @@ func DefaultStorageMiner() *StorageMiner { }, Addresses: MinerAddressConfig{ - PreCommitControl: []string{}, - CommitControl: []string{}, + PreCommitControl: []string{}, + CommitControl: []string{}, + TerminateControl: []string{}, + DealPublishControl: []string{}, }, } cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" diff --git a/node/hello/hello.go b/node/hello/hello.go index d4c631206..e31b7d25b 100644 --- a/node/hello/hello.go +++ b/node/hello/hello.go @@ -5,7 +5,7 @@ import ( "time" "github.com/filecoin-project/go-state-types/abi" - xerrors "golang.org/x/xerrors" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/big" "github.com/ipfs/go-cid" @@ -13,7 +13,7 @@ import ( "github.com/libp2p/go-libp2p-core/host" inet "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" + "github.com/libp2p/go-libp2p-core/protocol" cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/lotus/build" @@ -23,6 +23,8 @@ import ( "github.com/filecoin-project/lotus/lib/peermgr" ) +// TODO(TEST): missing test coverage. + const ProtocolID = "/fil/hello/1.0.0" var log = logging.Logger("hello") @@ -33,12 +35,14 @@ type HelloMessage struct { HeaviestTipSetWeight big.Int GenesisHash cid.Cid } + type LatencyMessage struct { TArrival int64 TSent int64 } type NewStreamFunc func(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error) + type Service struct { h host.Host @@ -62,7 +66,6 @@ func NewHelloService(h host.Host, cs *store.ChainStore, syncer *chain.Syncer, pm } func (hs *Service) HandleStream(s inet.Stream) { - var hmsg HelloMessage if err := cborutil.ReadCborRPC(s, &hmsg); err != nil { log.Infow("failed to read hello message, disconnecting", "error", err) @@ -77,7 +80,7 @@ func (hs *Service) HandleStream(s inet.Stream) { "hash", hmsg.GenesisHash) if hmsg.GenesisHash != hs.syncer.Genesis.Cids()[0] { - log.Warnf("other peer has different genesis! (%s)", hmsg.GenesisHash) + log.Debugf("other peer has different genesis! (%s)", hmsg.GenesisHash) _ = s.Conn().Close() return } @@ -121,7 +124,6 @@ func (hs *Service) HandleStream(s inet.Stream) { log.Debugf("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer()) hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts) } - } func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 441bdccc2..7ba6463e6 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -6,6 +6,8 @@ import ( "fmt" "io" "os" + "sort" + "time" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -31,10 +33,12 @@ import ( "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" + "github.com/multiformats/go-multibase" mh "github.com/multiformats/go-multihash" "go.uber.org/fx" "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-commp-utils/writer" datatransfer "github.com/filecoin-project/go-data-transfer" @@ -43,8 +47,10 @@ import ( rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-multistore" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" marketevents "github.com/filecoin-project/lotus/markets/loggers" @@ -92,7 +98,13 @@ func calcDealExpiration(minDuration uint64, md *dline.Info, startEpoch abi.Chain minExp := startEpoch + abi.ChainEpoch(minDuration) // Align on miners ProvingPeriodBoundary - return minExp + md.WPoStProvingPeriod - (minExp % md.WPoStProvingPeriod) + (md.PeriodStart % md.WPoStProvingPeriod) - 1 + exp := minExp + md.WPoStProvingPeriod - (minExp % md.WPoStProvingPeriod) + (md.PeriodStart % md.WPoStProvingPeriod) - 1 + // Should only be possible for miners created around genesis + for exp < minExp { + exp += md.WPoStProvingPeriod + } + + return exp } func (a *API) imgr() *importmgr.Mgr { @@ -100,8 +112,23 @@ func (a *API) imgr() *importmgr.Mgr { } func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { + return a.dealStarter(ctx, params, false) +} + +func (a *API) ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { + return a.dealStarter(ctx, params, true) +} + +func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isStateless bool) (*cid.Cid, error) { var storeID *multistore.StoreID - if params.Data.TransferType == storagemarket.TTGraphsync { + if isStateless { + if params.Data.TransferType != storagemarket.TTManual { + return nil, xerrors.Errorf("invalid transfer type %s for stateless storage deal", params.Data.TransferType) + } + if !params.EpochPrice.IsZero() { + return nil, xerrors.New("stateless storage deals can only be initiated with storage price of 0") + } + } else if params.Data.TransferType == storagemarket.TTGraphsync { importIDs := a.imgr().List() for _, importID := range importIDs { info, err := a.imgr().Info(importID) @@ -124,12 +151,12 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) walletKey, err := a.StateAccountKey(ctx, params.Wallet, types.EmptyTSK) if err != nil { - return nil, xerrors.Errorf("failed resolving params.Wallet addr: %w", params.Wallet) + return nil, xerrors.Errorf("failed resolving params.Wallet addr (%s): %w", params.Wallet, err) } exist, err := a.WalletHas(ctx, walletKey) if err != nil { - return nil, xerrors.Errorf("failed getting addr from wallet: %w", params.Wallet) + return nil, xerrors.Errorf("failed getting addr from wallet (%s): %w", params.Wallet, err) } if !exist { return nil, xerrors.Errorf("provided address doesn't exist in wallet") @@ -149,8 +176,6 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) return nil, xerrors.New("data doesn't fit in a sector") } - providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs) - dealStart := params.DealStartEpoch if dealStart <= 0 { // unset, or explicitly 'epoch undefined' ts, err := a.ChainHead(ctx) @@ -172,25 +197,112 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams) return nil, xerrors.Errorf("failed to get seal proof type: %w", err) } - result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{ - Addr: params.Wallet, - Info: &providerInfo, - Data: params.Data, - StartEpoch: dealStart, - EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), - Price: params.EpochPrice, - Collateral: params.ProviderCollateral, - Rt: st, - FastRetrieval: params.FastRetrieval, - VerifiedDeal: params.VerifiedDeal, - StoreID: storeID, - }) + // regular flow + if !isStateless { + providerInfo := utils.NewStorageProviderInfo(params.Miner, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs) - if err != nil { - return nil, xerrors.Errorf("failed to start deal: %w", err) + result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{ + Addr: params.Wallet, + Info: &providerInfo, + Data: params.Data, + StartEpoch: dealStart, + EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), + Price: params.EpochPrice, + Collateral: params.ProviderCollateral, + Rt: st, + FastRetrieval: params.FastRetrieval, + VerifiedDeal: params.VerifiedDeal, + StoreID: storeID, + }) + + if err != nil { + return nil, xerrors.Errorf("failed to start deal: %w", err) + } + + return &result.ProposalCid, nil } - return &result.ProposalCid, nil + // + // stateless flow from here to the end + // + + dealProposal := &market.DealProposal{ + PieceCID: *params.Data.PieceCid, + PieceSize: params.Data.PieceSize.Padded(), + Client: walletKey, + Provider: params.Miner, + Label: params.Data.Root.Encode(multibase.MustNewEncoder('u')), + StartEpoch: dealStart, + EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart), + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: params.ProviderCollateral, + ClientCollateral: big.Zero(), + VerifiedDeal: params.VerifiedDeal, + } + + if dealProposal.ProviderCollateral.IsZero() { + networkCollateral, err := a.StateDealProviderCollateralBounds(ctx, params.Data.PieceSize.Padded(), params.VerifiedDeal, types.EmptyTSK) + if err != nil { + return nil, xerrors.Errorf("failed to determine minimum provider collateral: %w", err) + } + dealProposal.ProviderCollateral = networkCollateral.Min + } + + dealProposalSerialized, err := cborutil.Dump(dealProposal) + if err != nil { + return nil, xerrors.Errorf("failed to serialize deal proposal: %w", err) + } + + dealProposalSig, err := a.WalletSign(ctx, walletKey, dealProposalSerialized) + if err != nil { + return nil, xerrors.Errorf("failed to sign proposal : %w", err) + } + + dealProposalSigned := &market.ClientDealProposal{ + Proposal: *dealProposal, + ClientSignature: *dealProposalSig, + } + dStream, err := network.NewFromLibp2pHost(a.Host, + // params duplicated from .../node/modules/client.go + // https://github.com/filecoin-project/lotus/pull/5961#discussion_r629768011 + network.RetryParameters(time.Second, 5*time.Minute, 15, 5), + ).NewDealStream(ctx, *mi.PeerId) + if err != nil { + return nil, xerrors.Errorf("opening dealstream to %s/%s failed: %w", params.Miner, *mi.PeerId, err) + } + + if err = dStream.WriteDealProposal(network.Proposal{ + FastRetrieval: true, + DealProposal: dealProposalSigned, + Piece: &storagemarket.DataRef{ + TransferType: storagemarket.TTManual, + Root: params.Data.Root, + PieceCid: params.Data.PieceCid, + PieceSize: params.Data.PieceSize, + }, + }); err != nil { + return nil, xerrors.Errorf("sending deal proposal failed: %w", err) + } + + resp, _, err := dStream.ReadDealResponse() + if err != nil { + return nil, xerrors.Errorf("reading proposal response failed: %w", err) + } + + dealProposalIpld, err := cborutil.AsIpld(dealProposalSigned) + if err != nil { + return nil, xerrors.Errorf("serializing proposal node failed: %w", err) + } + + if !dealProposalIpld.Cid().Equals(resp.Response.Proposal) { + return nil, xerrors.Errorf("provider returned proposal cid %s but we expected %s", resp.Response.Proposal, dealProposalIpld.Cid()) + } + + if resp.Response.State != storagemarket.StorageDealWaitingForData { + return nil, xerrors.Errorf("provider returned unexpected state %d for proposal %s, with message: %s", resp.Response.State, resp.Response.Proposal, resp.Response.Message) + } + + return &resp.Response.Proposal, nil } func (a *API) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { @@ -324,7 +436,19 @@ func (a *API) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) if piece != nil && !piece.Equals(*p.PieceCID) { continue } - out = append(out, a.makeRetrievalQuery(ctx, p, root, piece, rm.QueryParams{})) + + // do not rely on local data with respect to peer id + // fetch an up-to-date miner peer id from chain + mi, err := a.StateMinerInfo(ctx, p.Address, types.EmptyTSK) + if err != nil { + return nil, err + } + pp := rm.RetrievalPeer{ + Address: p.Address, + ID: *mi.PeerId, + } + + out = append(out, a.makeRetrievalQuery(ctx, pp, root, piece, rm.QueryParams{})) } return out, nil @@ -568,6 +692,8 @@ func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, sub return nil case rm.DealStatusRejected: return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) + case rm.DealStatusCancelled: + return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) case rm.DealStatusDealNotFound, rm.DealStatusErrored: @@ -725,6 +851,83 @@ func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref return } +func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) { + deals, err := a.Retrieval.ListDeals() + if err != nil { + return nil, err + } + dataTransfersByID, err := a.transfersByID(ctx) + if err != nil { + return nil, err + } + out := make([]api.RetrievalInfo, 0, len(deals)) + for _, v := range deals { + // Find the data transfer associated with this deal + var transferCh *api.DataTransferChannel + if v.ChannelID != nil { + if ch, ok := dataTransfersByID[*v.ChannelID]; ok { + transferCh = &ch + } + } + out = append(out, a.newRetrievalInfoWithTransfer(transferCh, v)) + } + sort.Slice(out, func(a, b int) bool { + return out[a].ID < out[b].ID + }) + return out, nil +} + +func (a *API) ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) { + updates := make(chan api.RetrievalInfo) + + unsub := a.Retrieval.SubscribeToEvents(func(_ rm.ClientEvent, deal rm.ClientDealState) { + updates <- a.newRetrievalInfo(ctx, deal) + }) + + go func() { + defer unsub() + <-ctx.Done() + }() + + return updates, nil +} + +func (a *API) newRetrievalInfoWithTransfer(ch *api.DataTransferChannel, deal rm.ClientDealState) api.RetrievalInfo { + return api.RetrievalInfo{ + PayloadCID: deal.PayloadCID, + ID: deal.ID, + PieceCID: deal.PieceCID, + PricePerByte: deal.PricePerByte, + UnsealPrice: deal.UnsealPrice, + Status: deal.Status, + Message: deal.Message, + Provider: deal.Sender, + BytesReceived: deal.TotalReceived, + BytesPaidFor: deal.BytesPaidFor, + TotalPaid: deal.FundsSpent, + TransferChannelID: deal.ChannelID, + DataTransfer: ch, + } +} + +func (a *API) newRetrievalInfo(ctx context.Context, v rm.ClientDealState) api.RetrievalInfo { + // Find the data transfer associated with this deal + var transferCh *api.DataTransferChannel + if v.ChannelID != nil { + state, err := a.DataTransfer.ChannelState(ctx, *v.ChannelID) + + // Note: If there was an error just ignore it, as the data transfer may + // be not found if it's no longer active + if err == nil { + ch := api.NewDataTransferChannel(a.Host.ID(), state) + ch.Stages = state.Stages() + transferCh = &ch + } + } + + return a.newRetrievalInfoWithTransfer(transferCh, v) +} + type multiStoreRetrievalStore struct { storeID multistore.StoreID store *multistore.Store diff --git a/node/impl/common/common.go b/node/impl/common/common.go index 7d99fb42a..7d7210eed 100644 --- a/node/impl/common/common.go +++ b/node/impl/common/common.go @@ -2,32 +2,18 @@ package common import ( "context" - "sort" - "strings" "github.com/gbrlsnchs/jwt/v3" "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" "go.uber.org/fx" "golang.org/x/xerrors" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - metrics "github.com/libp2p/go-libp2p-core/metrics" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - protocol "github.com/libp2p/go-libp2p-core/protocol" - swarm "github.com/libp2p/go-libp2p-swarm" - basichost "github.com/libp2p/go-libp2p/p2p/host/basic" - "github.com/libp2p/go-libp2p/p2p/net/conngater" - ma "github.com/multiformats/go-multiaddr" - "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/api" - apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/modules/lp2p" ) var session = uuid.New() @@ -35,13 +21,9 @@ var session = uuid.New() type CommonAPI struct { fx.In + NetAPI + APISecret *dtypes.APIAlg - RawHost lp2p.RawHost - Host host.Host - Router lp2p.BaseIpfsRouting - ConnGater *conngater.BasicConnectionGater - Reporter metrics.Reporter - Sk *dtypes.ScoreKeeper ShutdownChan dtypes.ShutdownChan } @@ -66,170 +48,6 @@ func (a *CommonAPI) AuthNew(ctx context.Context, perms []auth.Permission) ([]byt return jwt.Sign(&p, (*jwt.HMACSHA)(a.APISecret)) } -func (a *CommonAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { - return a.Host.Network().Connectedness(pid), nil -} -func (a *CommonAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) { - scores := a.Sk.Get() - out := make([]api.PubsubScore, len(scores)) - i := 0 - for k, v := range scores { - out[i] = api.PubsubScore{ID: k, Score: v} - i++ - } - - sort.Slice(out, func(i, j int) bool { - return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0 - }) - - return out, nil -} - -func (a *CommonAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { - conns := a.Host.Network().Conns() - out := make([]peer.AddrInfo, len(conns)) - - for i, conn := range conns { - out[i] = peer.AddrInfo{ - ID: conn.RemotePeer(), - Addrs: []ma.Multiaddr{ - conn.RemoteMultiaddr(), - }, - } - } - - return out, nil -} - -func (a *CommonAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { - info := &api.ExtendedPeerInfo{ID: p} - - agent, err := a.Host.Peerstore().Get(p, "AgentVersion") - if err == nil { - info.Agent = agent.(string) - } - - for _, a := range a.Host.Peerstore().Addrs(p) { - info.Addrs = append(info.Addrs, a.String()) - } - sort.Strings(info.Addrs) - - protocols, err := a.Host.Peerstore().GetProtocols(p) - if err == nil { - sort.Strings(protocols) - info.Protocols = protocols - } - - if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil { - info.ConnMgrMeta = &api.ConnMgrInfo{ - FirstSeen: cm.FirstSeen, - Value: cm.Value, - Tags: cm.Tags, - Conns: cm.Conns, - } - } - - return info, nil -} - -func (a *CommonAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { - if swrm, ok := a.Host.Network().(*swarm.Swarm); ok { - swrm.Backoff().Clear(p.ID) - } - - return a.Host.Connect(ctx, p) -} - -func (a *CommonAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) { - return peer.AddrInfo{ - ID: a.Host.ID(), - Addrs: a.Host.Addrs(), - }, nil -} - -func (a *CommonAPI) NetDisconnect(ctx context.Context, p peer.ID) error { - return a.Host.Network().ClosePeer(p) -} - -func (a *CommonAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { - return a.Router.FindPeer(ctx, p) -} - -func (a *CommonAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) { - autonat := a.RawHost.(*basichost.BasicHost).AutoNat - - if autonat == nil { - return api.NatInfo{ - Reachability: network.ReachabilityUnknown, - }, nil - } - - var maddr string - if autonat.Status() == network.ReachabilityPublic { - pa, err := autonat.PublicAddr() - if err != nil { - return api.NatInfo{}, err - } - maddr = pa.String() - } - - return api.NatInfo{ - Reachability: autonat.Status(), - PublicAddr: maddr, - }, nil -} - -func (a *CommonAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { - ag, err := a.Host.Peerstore().Get(p, "AgentVersion") - if err != nil { - return "", err - } - - if ag == nil { - return "unknown", nil - } - - return ag.(string), nil -} - -func (a *CommonAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { - return a.Reporter.GetBandwidthTotals(), nil -} - -func (a *CommonAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { - out := make(map[string]metrics.Stats) - for p, s := range a.Reporter.GetBandwidthByPeer() { - out[p.String()] = s - } - return out, nil -} - -func (a *CommonAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { - return a.Reporter.GetBandwidthByProtocol(), nil -} - -func (a *CommonAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { - return build.OpenRPCDiscoverJSON_Full(), nil -} - -func (a *CommonAPI) ID(context.Context) (peer.ID, error) { - return a.Host.ID(), nil -} - -func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) { - v, err := api.VersionForType(api.RunningNodeType) - if err != nil { - return api.APIVersion{}, err - } - - return api.APIVersion{ - Version: build.UserVersion(), - APIVersion: v, - - BlockDelay: build.BlockDelaySecs, - }, nil -} - func (a *CommonAPI) LogList(context.Context) ([]string, error) { return logging.GetSubsystems(), nil } @@ -251,4 +69,16 @@ func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) { return make(chan struct{}), nil // relies on jsonrpc closing } -var _ api.Common = &CommonAPI{} +func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) { + v, err := api.VersionForType(api.RunningNodeType) + if err != nil { + return api.APIVersion{}, err + } + + return api.APIVersion{ + Version: build.UserVersion(), + APIVersion: v, + + BlockDelay: build.BlockDelaySecs, + }, nil +} diff --git a/node/impl/common/mock/net.go b/node/impl/common/mock/net.go new file mode 100644 index 000000000..010688d8a --- /dev/null +++ b/node/impl/common/mock/net.go @@ -0,0 +1,101 @@ +package mock + +import ( + "context" + "errors" + + "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + "go.uber.org/fx" +) + +var ( + errNotImplemented = errors.New("not implemented") +) + +type MockNetAPI struct { + fx.In +} + +func (a *MockNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { + return "", errNotImplemented +} + +func (a *MockNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (conn network.Connectedness, err error) { + err = errNotImplemented + return +} + +func (a *MockNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) { + return nil, errNotImplemented +} + +func (a *MockNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { + return nil, errNotImplemented +} + +func (a *MockNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { + return nil, errNotImplemented +} + +func (a *MockNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { + return errNotImplemented +} + +func (a *MockNetAPI) NetAddrsListen(context.Context) (ai peer.AddrInfo, err error) { + err = errNotImplemented + return +} + +func (a *MockNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error { + return errNotImplemented +} + +func (a *MockNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (ai peer.AddrInfo, err error) { + err = errNotImplemented + return +} + +func (a *MockNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) { + err = errNotImplemented + return +} + +func (a *MockNetAPI) NetBandwidthStats(ctx context.Context) (s metrics.Stats, err error) { + err = errNotImplemented + return +} + +func (a *MockNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { + return nil, errNotImplemented +} + +func (a *MockNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { + return nil, errNotImplemented +} + +func (a *MockNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { + return nil, errNotImplemented +} + +func (a *MockNetAPI) ID(context.Context) (p peer.ID, err error) { + err = errNotImplemented + return +} + +func (a *MockNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error { + return errNotImplemented +} + +func (a *MockNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error { + return errNotImplemented +} + +func (a *MockNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) { + err = errNotImplemented + return +} diff --git a/node/impl/common/net.go b/node/impl/common/net.go new file mode 100644 index 000000000..7e9c506ca --- /dev/null +++ b/node/impl/common/net.go @@ -0,0 +1,34 @@ +package common + +import ( + "context" + + metrics "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" + + "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" +) + +type NetAPI interface { + NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) + NetPubsubScores(context.Context) ([]api.PubsubScore, error) + NetPeers(context.Context) ([]peer.AddrInfo, error) + NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) + NetConnect(ctx context.Context, p peer.AddrInfo) error + NetAddrsListen(context.Context) (peer.AddrInfo, error) + NetDisconnect(ctx context.Context, p peer.ID) error + NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) + NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) + NetAgentVersion(ctx context.Context, p peer.ID) (string, error) + NetBandwidthStats(ctx context.Context) (metrics.Stats, error) + NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) + NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) + Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) + ID(context.Context) (peer.ID, error) + NetBlockAdd(ctx context.Context, acl api.NetBlockList) error + NetBlockRemove(ctx context.Context, acl api.NetBlockList) error + NetBlockList(ctx context.Context) (api.NetBlockList, error) +} diff --git a/node/impl/common/conngater.go b/node/impl/common/net_conngater.go similarity index 92% rename from node/impl/common/conngater.go rename to node/impl/common/net_conngater.go index ab387631c..581e655dd 100644 --- a/node/impl/common/conngater.go +++ b/node/impl/common/net_conngater.go @@ -14,7 +14,7 @@ import ( var cLog = logging.Logger("conngater") -func (a *CommonAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error { +func (a *Libp2pNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error { for _, p := range acl.Peers { err := a.ConnGater.BlockPeer(p) if err != nil { @@ -89,7 +89,7 @@ func (a *CommonAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error return nil } -func (a *CommonAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error { +func (a *Libp2pNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error { for _, p := range acl.Peers { err := a.ConnGater.UnblockPeer(p) if err != nil { @@ -124,7 +124,7 @@ func (a *CommonAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) er return nil } -func (a *CommonAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) { +func (a *Libp2pNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) { result.Peers = a.ConnGater.ListBlockedPeers() for _, ip := range a.ConnGater.ListBlockedAddrs() { result.IPAddrs = append(result.IPAddrs, ip.String()) diff --git a/node/impl/common/net_libp2p.go b/node/impl/common/net_libp2p.go new file mode 100644 index 000000000..9848feaf8 --- /dev/null +++ b/node/impl/common/net_libp2p.go @@ -0,0 +1,186 @@ +package common + +import ( + "context" + "sort" + "strings" + + "github.com/libp2p/go-libp2p-core/host" + metrics "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + protocol "github.com/libp2p/go-libp2p-core/protocol" + swarm "github.com/libp2p/go-libp2p-swarm" + basichost "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/net/conngater" + ma "github.com/multiformats/go-multiaddr" + "go.uber.org/fx" + + "github.com/filecoin-project/lotus/api" + apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/lp2p" +) + +type Libp2pNetAPI struct { + fx.In + + RawHost lp2p.RawHost + Host host.Host + Router lp2p.BaseIpfsRouting + ConnGater *conngater.BasicConnectionGater + Reporter metrics.Reporter + Sk *dtypes.ScoreKeeper +} + +func (a *Libp2pNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { + return a.Host.Network().Connectedness(pid), nil +} + +func (a *Libp2pNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) { + scores := a.Sk.Get() + out := make([]api.PubsubScore, len(scores)) + i := 0 + for k, v := range scores { + out[i] = api.PubsubScore{ID: k, Score: v} + i++ + } + + sort.Slice(out, func(i, j int) bool { + return strings.Compare(string(out[i].ID), string(out[j].ID)) > 0 + }) + + return out, nil +} + +func (a *Libp2pNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) { + conns := a.Host.Network().Conns() + out := make([]peer.AddrInfo, len(conns)) + + for i, conn := range conns { + out[i] = peer.AddrInfo{ + ID: conn.RemotePeer(), + Addrs: []ma.Multiaddr{ + conn.RemoteMultiaddr(), + }, + } + } + + return out, nil +} + +func (a *Libp2pNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { + info := &api.ExtendedPeerInfo{ID: p} + + agent, err := a.Host.Peerstore().Get(p, "AgentVersion") + if err == nil { + info.Agent = agent.(string) + } + + for _, a := range a.Host.Peerstore().Addrs(p) { + info.Addrs = append(info.Addrs, a.String()) + } + sort.Strings(info.Addrs) + + protocols, err := a.Host.Peerstore().GetProtocols(p) + if err == nil { + sort.Strings(protocols) + info.Protocols = protocols + } + + if cm := a.Host.ConnManager().GetTagInfo(p); cm != nil { + info.ConnMgrMeta = &api.ConnMgrInfo{ + FirstSeen: cm.FirstSeen, + Value: cm.Value, + Tags: cm.Tags, + Conns: cm.Conns, + } + } + + return info, nil +} + +func (a *Libp2pNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error { + if swrm, ok := a.Host.Network().(*swarm.Swarm); ok { + swrm.Backoff().Clear(p.ID) + } + + return a.Host.Connect(ctx, p) +} + +func (a *Libp2pNetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) { + return peer.AddrInfo{ + ID: a.Host.ID(), + Addrs: a.Host.Addrs(), + }, nil +} + +func (a *Libp2pNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error { + return a.Host.Network().ClosePeer(p) +} + +func (a *Libp2pNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { + return a.Router.FindPeer(ctx, p) +} + +func (a *Libp2pNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) { + autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat() + + if autonat == nil { + return api.NatInfo{ + Reachability: network.ReachabilityUnknown, + }, nil + } + + var maddr string + if autonat.Status() == network.ReachabilityPublic { + pa, err := autonat.PublicAddr() + if err != nil { + return api.NatInfo{}, err + } + maddr = pa.String() + } + + return api.NatInfo{ + Reachability: autonat.Status(), + PublicAddr: maddr, + }, nil +} + +func (a *Libp2pNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) { + ag, err := a.Host.Peerstore().Get(p, "AgentVersion") + if err != nil { + return "", err + } + + if ag == nil { + return "unknown", nil + } + + return ag.(string), nil +} + +func (a *Libp2pNetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) { + return a.Reporter.GetBandwidthTotals(), nil +} + +func (a *Libp2pNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) { + out := make(map[string]metrics.Stats) + for p, s := range a.Reporter.GetBandwidthByPeer() { + out[p.String()] = s + } + return out, nil +} + +func (a *Libp2pNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) { + return a.Reporter.GetBandwidthByProtocol(), nil +} + +func (a *Libp2pNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { + return build.OpenRPCDiscoverJSON_Full(), nil +} + +func (a *Libp2pNetAPI) ID(context.Context) (peer.ID, error) { + return a.Host.ID(), nil +} diff --git a/node/impl/full.go b/node/impl/full.go index add40917c..50fd09cdf 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -2,16 +2,21 @@ package impl import ( "context" + "time" + + "github.com/libp2p/go-libp2p-core/peer" logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/node/impl/client" "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/market" "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/lp2p" ) var log = logging.Logger("node") @@ -30,11 +35,86 @@ type FullNodeAPI struct { full.SyncAPI full.BeaconAPI - DS dtypes.MetadataDS + DS dtypes.MetadataDS + NetworkName dtypes.NetworkName } func (n *FullNodeAPI) CreateBackup(ctx context.Context, fpath string) error { return backup(n.DS, fpath) } +func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (status api.NodeStatus, err error) { + curTs, err := n.ChainHead(ctx) + if err != nil { + return status, err + } + + status.SyncStatus.Epoch = uint64(curTs.Height()) + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + delta := time.Since(timestamp).Seconds() + status.SyncStatus.Behind = uint64(delta / 30) + + // get peers in the messages and blocks topics + peersMsgs := make(map[peer.ID]struct{}) + peersBlocks := make(map[peer.ID]struct{}) + + for _, p := range n.PubSub.ListPeers(build.MessagesTopic(n.NetworkName)) { + peersMsgs[p] = struct{}{} + } + + for _, p := range n.PubSub.ListPeers(build.BlocksTopic(n.NetworkName)) { + peersBlocks[p] = struct{}{} + } + + // get scores for all connected and recent peers + scores, err := n.NetPubsubScores(ctx) + if err != nil { + return status, err + } + + for _, score := range scores { + if score.Score.Score > lp2p.PublishScoreThreshold { + _, inMsgs := peersMsgs[score.ID] + if inMsgs { + status.PeerStatus.PeersToPublishMsgs++ + } + + _, inBlocks := peersBlocks[score.ID] + if inBlocks { + status.PeerStatus.PeersToPublishBlocks++ + } + } + } + + if inclChainStatus && status.SyncStatus.Epoch > uint64(build.Finality) { + blockCnt := 0 + ts := curTs + + for i := 0; i < 100; i++ { + blockCnt += len(ts.Blocks()) + tsk := ts.Parents() + ts, err = n.ChainGetTipSet(ctx, tsk) + if err != nil { + return status, err + } + } + + status.ChainStatus.BlocksPerTipsetLast100 = float64(blockCnt) / 100 + + for i := 100; i < int(build.Finality); i++ { + blockCnt += len(ts.Blocks()) + tsk := ts.Parents() + ts, err = n.ChainGetTipSet(ctx, tsk) + if err != nil { + return status, err + } + } + + status.ChainStatus.BlocksPerTipsetLastFinality = float64(blockCnt) / float64(build.Finality) + + } + + return status, nil +} + var _ api.FullNode = &FullNodeAPI{} diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index d26c2d7ea..33d14d3ba 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -228,6 +228,33 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] return out, nil } +func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) { + ts, err := a.Chain.GetTipSetFromKey(tsk) + if err != nil { + return nil, err + } + + // genesis block has no parent messages... + if ts.Height() == 0 { + return nil, nil + } + + cm, err := a.Chain.MessagesForTipset(ts) + if err != nil { + return nil, err + } + + var out []api.Message + for _, m := range cm { + out = append(out, api.Message{ + Cid: m.Cid(), + Message: m.VMMessage(), + }) + } + + return out, nil +} + func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index c7fd7c286..edf53ff63 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -324,7 +324,7 @@ func gasEstimateGasLimit( func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, _ types.TipSetKey) (*types.Message, error) { if msg.GasLimit == 0 { - gasLimit, err := m.GasEstimateGasLimit(ctx, msg, types.TipSetKey{}) + gasLimit, err := m.GasEstimateGasLimit(ctx, msg, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("estimating gas used: %w", err) } @@ -332,7 +332,7 @@ func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Messag } if msg.GasPremium == types.EmptyInt || types.BigCmp(msg.GasPremium, types.NewInt(0)) == 0 { - gasPremium, err := m.GasEstimateGasPremium(ctx, 10, msg.From, msg.GasLimit, types.TipSetKey{}) + gasPremium, err := m.GasEstimateGasPremium(ctx, 10, msg.From, msg.GasLimit, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("estimating gas price: %w", err) } diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index b95ae3405..bd91387a2 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -225,6 +225,18 @@ func (a *MpoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Mess return smsgs, nil } +func (a *MpoolAPI) MpoolCheckMessages(ctx context.Context, protos []*api.MessagePrototype) ([][]api.MessageCheckStatus, error) { + return a.Mpool.CheckMessages(ctx, protos) +} + +func (a *MpoolAPI) MpoolCheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) { + return a.Mpool.CheckPendingMessages(ctx, from) +} + +func (a *MpoolAPI) MpoolCheckReplaceMessages(ctx context.Context, msgs []*types.Message) ([][]api.MessageCheckStatus, error) { + return a.Mpool.CheckReplaceMessages(ctx, msgs) +} + func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { return a.Mpool.GetNonce(ctx, addr, types.EmptyTSK) } diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go index 9c5f683c4..e44509d7c 100644 --- a/node/impl/full/multisig.go +++ b/node/impl/full/multisig.go @@ -14,7 +14,6 @@ import ( multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig" - "github.com/ipfs/go-cid" "go.uber.org/fx" "golang.org/x/xerrors" ) @@ -37,134 +36,129 @@ func (a *MsigAPI) messageBuilder(ctx context.Context, from address.Address) (mul // TODO: remove gp (gasPrice) from arguments // TODO: Add "vesting start" to arguments. -func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { +func (a *MsigAPI) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (*api.MessagePrototype, error) { mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } msg, err := mb.Create(addrs, req, 0, duration, val) if err != nil { - return cid.Undef, err + return nil, err } - // send the message out to the network - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } -func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } msg, err := mb.Propose(msig, to, amt, abi.MethodNum(method), params) if err != nil { - return cid.Undef, xerrors.Errorf("failed to create proposal: %w", err) + return nil, xerrors.Errorf("failed to create proposal: %w", err) } - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to push message: %w", err) - } - - return smsg.Cid(), nil + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } -func (a *MsigAPI) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { +func (a *MsigAPI) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (*api.MessagePrototype, error) { enc, actErr := serializeAddParams(newAdd, inc) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } -func (a *MsigAPI) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) { +func (a *MsigAPI) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (*api.MessagePrototype, error) { enc, actErr := serializeAddParams(newAdd, inc) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } -func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) { +func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (*api.MessagePrototype, error) { enc, actErr := serializeAddParams(newAdd, inc) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } -func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { enc, actErr := serializeSwapParams(oldAdd, newAdd) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } -func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { enc, actErr := serializeSwapParams(oldAdd, newAdd) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigApproveTxnHash(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } -func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { enc, actErr := serializeSwapParams(oldAdd, newAdd) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } -func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { +func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { return a.msigApproveOrCancelSimple(ctx, api.MsigApprove, msig, txID, src) } -func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { return a.msigApproveOrCancelTxnHash(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params) } -func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { return a.msigApproveOrCancelTxnHash(ctx, api.MsigCancel, msig, txID, src, to, amt, src, method, params) } -func (a *MsigAPI) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) { +func (a *MsigAPI) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*api.MessagePrototype, error) { enc, actErr := serializeRemoveParams(toRemove, decrease) if actErr != nil { - return cid.Undef, actErr + return nil, actErr } return a.MsigPropose(ctx, msig, msig, types.NewInt(0), proposer, uint64(multisig.Methods.RemoveSigner), enc) } -func (a *MsigAPI) msigApproveOrCancelSimple(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) { +func (a *MsigAPI) msigApproveOrCancelSimple(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { if msig == address.Undef { - return cid.Undef, xerrors.Errorf("must provide multisig address") + return nil, xerrors.Errorf("must provide multisig address") } if src == address.Undef { - return cid.Undef, xerrors.Errorf("must provide source address") + return nil, xerrors.Errorf("must provide source address") } mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } var msg *types.Message @@ -174,34 +168,31 @@ func (a *MsigAPI) msigApproveOrCancelSimple(ctx context.Context, operation api.M case api.MsigCancel: msg, err = mb.Cancel(msig, txID, nil) default: - return cid.Undef, xerrors.Errorf("Invalid operation for msigApproveOrCancel") + return nil, xerrors.Errorf("Invalid operation for msigApproveOrCancel") } if err != nil { - return cid.Undef, err + return nil, err } - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil - + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } -func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { +func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api.MsigProposeResponse, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { if msig == address.Undef { - return cid.Undef, xerrors.Errorf("must provide multisig address") + return nil, xerrors.Errorf("must provide multisig address") } if src == address.Undef { - return cid.Undef, xerrors.Errorf("must provide source address") + return nil, xerrors.Errorf("must provide source address") } if proposer.Protocol() != address.ID { proposerID, err := a.StateAPI.StateLookupID(ctx, proposer, types.EmptyTSK) if err != nil { - return cid.Undef, err + return nil, err } proposer = proposerID } @@ -216,7 +207,7 @@ func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api. mb, err := a.messageBuilder(ctx, src) if err != nil { - return cid.Undef, err + return nil, err } var msg *types.Message @@ -226,18 +217,16 @@ func (a *MsigAPI) msigApproveOrCancelTxnHash(ctx context.Context, operation api. case api.MsigCancel: msg, err = mb.Cancel(msig, txID, &p) default: - return cid.Undef, xerrors.Errorf("Invalid operation for msigApproveOrCancel") + return nil, xerrors.Errorf("Invalid operation for msigApproveOrCancel") } if err != nil { - return cid.Undef, err + return nil, err } - smsg, err := a.MpoolAPI.MpoolPushMessage(ctx, msg, nil) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil + return &api.MessagePrototype{ + Message: *msg, + ValidNonce: false, + }, nil } func serializeAddParams(new address.Address, inc bool) ([]byte, error) { diff --git a/node/impl/full/state.go b/node/impl/full/state.go index de1b77b4f..d8545ae13 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -426,7 +426,7 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. }, nil } -func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { +func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (a *types.Actor, err error) { ts, err := m.Chain.GetTipSetFromKey(tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) @@ -705,7 +705,7 @@ func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid. return nil, xerrors.Errorf("failed to load new state tree: %w", err) } - return state.Diff(oldTree, newTree) + return state.Diff(ctx, oldTree, newTree) } func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) { @@ -808,8 +808,31 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc if match.To == address.Undef && match.From == address.Undef { return nil, xerrors.Errorf("must specify at least To or From in message filter") + } else if match.To != address.Undef { + _, err := a.StateLookupID(ctx, match.To, tsk) + + // if the recipient doesn't exist at the start point, we're not gonna find any matches + if xerrors.Is(err, types.ErrActorNotFound) { + return nil, nil + } + + if err != nil { + return nil, xerrors.Errorf("looking up match.To: %w", err) + } + } else if match.From != address.Undef { + _, err := a.StateLookupID(ctx, match.From, tsk) + + // if the sender doesn't exist at the start point, we're not gonna find any matches + if xerrors.Is(err, types.ErrActorNotFound) { + return nil, nil + } + + if err != nil { + return nil, xerrors.Errorf("looking up match.From: %w", err) + } } + // TODO: This should probably match on both ID and robust address, no? matchFunc := func(msg *types.Message) bool { if match.From != address.Undef && match.From != msg.From { return false diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index 1a088fb77..2c697483b 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -104,7 +104,7 @@ func (a *SyncAPI) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHe func (a *SyncAPI) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error { log.Warnf("Marking tipset %s as checkpoint", tsk) - return a.Syncer.SetCheckpoint(tsk) + return a.Syncer.SyncCheckpoint(ctx, tsk) } func (a *SyncAPI) SyncMarkBad(ctx context.Context, bcid cid.Cid) error { diff --git a/node/impl/remoteworker.go b/node/impl/remoteworker.go index 8dc7510b4..d27b3baff 100644 --- a/node/impl/remoteworker.go +++ b/node/impl/remoteworker.go @@ -38,6 +38,16 @@ func connectRemoteWorker(ctx context.Context, fa api.Common, url string) (*remot return nil, xerrors.Errorf("creating jsonrpc client: %w", err) } + wver, err := wapi.Version(ctx) + if err != nil { + closer() + return nil, err + } + + if !wver.EqMajorMinor(api.WorkerAPIVersion0) { + return nil, xerrors.Errorf("unsupported worker api version: %s (expected %s)", wver, api.WorkerAPIVersion0) + } + return &remoteWorker{wapi, closer}, nil } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 61c69b2ba..96761ecb5 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -8,6 +8,7 @@ import ( "strconv" "time" + "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/gen" @@ -23,9 +24,7 @@ import ( "github.com/filecoin-project/go-fil-markets/piecestore" retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" @@ -49,54 +48,63 @@ import ( type StorageMinerAPI struct { common.CommonAPI - SectorBlocks *sectorblocks.SectorBlocks + Full api.FullNode + LocalStore *stores.Local + RemoteStore *stores.Remote - PieceStore dtypes.ProviderPieceStore - StorageProvider storagemarket.StorageProvider - RetrievalProvider retrievalmarket.RetrievalProvider - Miner *storage.Miner - BlockMiner *miner.Miner - Full api.FullNode - StorageMgr *sectorstorage.Manager `optional:"true"` - IStorageMgr sectorstorage.SectorManager - *stores.Index - storiface.WorkerReturn - DataTransfer dtypes.ProviderDataTransfer - Host host.Host - AddrSel *storage.AddressSelector - DealPublisher *storageadapter.DealPublisher + // Markets + PieceStore dtypes.ProviderPieceStore `optional:"true"` + StorageProvider storagemarket.StorageProvider `optional:"true"` + RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"` + DataTransfer dtypes.ProviderDataTransfer `optional:"true"` + DealPublisher *storageadapter.DealPublisher `optional:"true"` + SectorBlocks *sectorblocks.SectorBlocks `optional:"true"` + Host host.Host `optional:"true"` - Epp gen.WinningPoStProver + // Miner / storage + Miner *storage.Miner `optional:"true"` + BlockMiner *miner.Miner `optional:"true"` + StorageMgr *sectorstorage.Manager `optional:"true"` + IStorageMgr sectorstorage.SectorManager `optional:"true"` + stores.SectorIndex + storiface.WorkerReturn `optional:"true"` + AddrSel *storage.AddressSelector + + Epp gen.WinningPoStProver `optional:"true"` DS dtypes.MetadataDS - ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc - SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc - ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc - SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc - StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc - SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc - ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc - SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc - ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc - SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc - ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc - SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc - ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc - SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc - SetSealingConfigFunc dtypes.SetSealingConfigFunc - GetSealingConfigFunc dtypes.GetSealingConfigFunc - GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc - SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc + ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc `optional:"true"` + SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc `optional:"true"` + ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` + SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc `optional:"true"` + StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc `optional:"true"` + SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc `optional:"true"` + ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc `optional:"true"` + SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc `optional:"true"` + ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` + SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc `optional:"true"` + ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc `optional:"true"` + SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc `optional:"true"` + ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` + SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc `optional:"true"` + SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"` + GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"` + GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"` + SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"` } -func (sm *StorageMinerAPI) ServeRemote(w http.ResponseWriter, r *http.Request) { - if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) - return - } +func (sm *StorageMinerAPI) ServeRemote(perm bool) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if perm == true { + if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + } - sm.StorageMgr.ServeHTTP(w, r) + sm.StorageMgr.ServeHTTP(w, r) + } } func (sm *StorageMinerAPI) WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) { @@ -136,12 +144,12 @@ func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, erro // wait for the sector to enter the Packing state // TODO: instead of polling implement some pubsub-type thing in storagefsm for { - info, err := sm.Miner.GetSectorInfo(sr.ID.Number) + info, err := sm.Miner.SectorsStatus(ctx, sr.ID.Number, false) if err != nil { return abi.SectorID{}, xerrors.Errorf("getting pledged sector info: %w", err) } - if info.State != sealing.UndefinedSectorState { + if info.State != api.SectorState(sealing.UndefinedSectorState) { return sr.ID, nil } @@ -154,62 +162,11 @@ func (sm *StorageMinerAPI) PledgeSector(ctx context.Context) (abi.SectorID, erro } func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { - info, err := sm.Miner.GetSectorInfo(sid) + sInfo, err := sm.Miner.SectorsStatus(ctx, sid, false) if err != nil { return api.SectorInfo{}, err } - deals := make([]abi.DealID, len(info.Pieces)) - for i, piece := range info.Pieces { - if piece.DealInfo == nil { - continue - } - deals[i] = piece.DealInfo.DealID - } - - log := make([]api.SectorLog, len(info.Log)) - for i, l := range info.Log { - log[i] = api.SectorLog{ - Kind: l.Kind, - Timestamp: l.Timestamp, - Trace: l.Trace, - Message: l.Message, - } - } - - sInfo := api.SectorInfo{ - SectorID: sid, - State: api.SectorState(info.State), - CommD: info.CommD, - CommR: info.CommR, - Proof: info.Proof, - Deals: deals, - Ticket: api.SealTicket{ - Value: info.TicketValue, - Epoch: info.TicketEpoch, - }, - Seed: api.SealSeed{ - Value: info.SeedValue, - Epoch: info.SeedEpoch, - }, - PreCommitMsg: info.PreCommitMessage, - CommitMsg: info.CommitMessage, - Retries: info.InvalidProofs, - ToUpgrade: sm.Miner.IsMarkedForUpgrade(sid), - - LastErr: info.LastErr, - Log: log, - // on chain info - SealProof: 0, - Activation: 0, - Expiration: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - InitialPledge: big.Zero(), - OnTime: 0, - Early: 0, - } - if !showOnChainInfo { return sInfo, nil } @@ -238,6 +195,14 @@ func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumb return sInfo, nil } +func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r sto.Data, d api.PieceDealInfo) (api.SectorOffset, error) { + return sm.Miner.SectorAddPieceToAny(ctx, size, r, d) +} + +func (sm *StorageMinerAPI) SectorsUnsealPiece(ctx context.Context, sector sto.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error { + return sm.StorageMgr.SectorsUnsealPiece(ctx, sector, offset, size, randomness, commd) +} + // List all staged sectors func (sm *StorageMinerAPI) SectorsList(context.Context) ([]abi.SectorNumber, error) { sectors, err := sm.Miner.ListSectors() @@ -300,7 +265,17 @@ func (sm *StorageMinerAPI) SectorsSummary(ctx context.Context) (map[api.SectorSt } func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { - return sm.StorageMgr.StorageLocal(ctx) + l, err := sm.LocalStore.Local(ctx) + if err != nil { + return nil, err + } + + out := map[stores.ID]string{} + for _, st := range l { + out[st.ID] = st.LocalPath + } + + return out, nil } func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) { @@ -320,7 +295,7 @@ func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.Sealed } func (sm *StorageMinerAPI) StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { - return sm.StorageMgr.FsStat(ctx, id) + return sm.RemoteStore.FsStat(ctx, id) } func (sm *StorageMinerAPI) SectorStartSealing(ctx context.Context, number abi.SectorNumber) error { @@ -454,6 +429,11 @@ func (sm *StorageMinerAPI) MarketListRetrievalDeals(ctx context.Context) ([]retr deals := sm.RetrievalProvider.ListDeals() for _, deal := range deals { + if deal.ChannelID != nil { + if deal.ChannelID.Initiator == "" || deal.ChannelID.Responder == "" { + deal.ChannelID = nil // don't try to push unparsable peer IDs over jsonrpc + } + } out = append(out, deal) } @@ -683,7 +663,7 @@ func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredP var rg storiface.RGetter if expensive { rg = func(ctx context.Context, id abi.SectorID) (cid.Cid, error) { - si, err := sm.Miner.GetSectorInfo(id.Number) + si, err := sm.Miner.SectorsStatus(ctx, id.Number, false) if err != nil { return cid.Undef, err } diff --git a/node/modules/blockstore.go b/node/modules/blockstore.go index 787d782b7..d113e9037 100644 --- a/node/modules/blockstore.go +++ b/node/modules/blockstore.go @@ -37,6 +37,10 @@ func UniversalBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.Locked return bs, err } +func DiscardColdBlockstore(lc fx.Lifecycle, bs dtypes.UniversalBlockstore) (dtypes.ColdBlockstore, error) { + return blockstore.NewDiscardStore(bs), nil +} + func BadgerHotBlockstore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.HotBlockstore, error) { path, err := r.SplitstorePath() if err != nil { @@ -66,19 +70,16 @@ func BadgerHotBlockstore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.HotBlocksto return bs, nil } -func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.UniversalBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { - return func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.UniversalBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { +func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.ColdBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { + return func(lc fx.Lifecycle, r repo.LockedRepo, ds dtypes.MetadataDS, cold dtypes.ColdBlockstore, hot dtypes.HotBlockstore) (dtypes.SplitBlockstore, error) { path, err := r.SplitstorePath() if err != nil { return nil, err } cfg := &splitstore.Config{ - TrackingStoreType: cfg.Splitstore.TrackingStoreType, - MarkSetType: cfg.Splitstore.MarkSetType, - EnableFullCompaction: cfg.Splitstore.EnableFullCompaction, - EnableGC: cfg.Splitstore.EnableGC, - Archival: cfg.Splitstore.Archival, + MarkSetType: cfg.Splitstore.MarkSetType, + DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard", } ss, err := splitstore.Open(path, ds, hot, cold, cfg) if err != nil { @@ -94,6 +95,18 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked } } +func SplitBlockstoreGCReferenceProtector(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.GCReferenceProtector { + return s.(dtypes.GCReferenceProtector) +} + +func NoopGCReferenceProtector(_ fx.Lifecycle) dtypes.GCReferenceProtector { + return dtypes.NoopGCReferenceProtector{} +} + +func ExposedSplitBlockstore(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.ExposedBlockstore { + return s.(*splitstore.SplitStore).Expose() +} + func StateFlatBlockstore(_ fx.Lifecycle, _ helpers.MetricsCtx, bs dtypes.UniversalBlockstore) (dtypes.BasicStateBlockstore, error) { return bs, nil } diff --git a/node/modules/chain.go b/node/modules/chain.go index ffdf3aa3a..c4017b8c0 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -9,7 +9,6 @@ import ( "github.com/ipfs/go-blockservice" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/routing" - pubsub "github.com/libp2p/go-libp2p-pubsub" "go.uber.org/fx" "golang.org/x/xerrors" @@ -59,8 +58,7 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty return blockservice.New(bs, rem) } -func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal) (*messagepool.MessagePool, error) { - mpp := messagepool.NewProvider(sm, ps) +func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { mp, err := messagepool.New(mpp, ds, nn, j) if err != nil { return nil, xerrors.Errorf("constructing mpool: %w", err) @@ -70,6 +68,7 @@ func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds return mp.Close() }, }) + protector.AddProtector(mp.ForEachPendingMessage) return mp, nil } diff --git a/node/modules/client.go b/node/modules/client.go index c5dbff9bd..e0bcc13c7 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -134,26 +134,21 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap // data-transfer push / pull channel restart configuration: dtRestartConfig := dtimpl.ChannelRestartConfig(channelmonitor.Config{ - // For now only monitor push channels (for storage deals) - MonitorPushChannels: true, - // TODO: Enable pull channel monitoring (for retrievals) when the - // following issue has been fixed: - // https://github.com/filecoin-project/go-data-transfer/issues/172 - MonitorPullChannels: false, - // Wait up to 30s for the other side to respond to an Open channel message - AcceptTimeout: 30 * time.Second, - // Send a restart message if the data rate falls below 1024 bytes / minute - Interval: time.Minute, - MinBytesTransferred: 1024, - // Perform check 10 times / minute - ChecksPerInterval: 10, + // Disable Accept and Complete timeouts until this issue is resolved: + // https://github.com/filecoin-project/lotus/issues/6343# + // Wait for the other side to respond to an Open channel message + AcceptTimeout: 0, + // Wait for the other side to send a Complete message once all + // data has been sent / received + CompleteTimeout: 0, + + // When an error occurs, wait a little while until all related errors + // have fired before sending a restart message + RestartDebounce: 10 * time.Second, // After sending a restart, wait for at least 1 minute before sending another RestartBackoff: time.Minute, // After trying to restart 3 times, give up and fail the transfer MaxConsecutiveRestarts: 3, - // Wait up to 30s for the other side to send a Complete message once all - // data has been sent / received - CompleteTimeout: 30 * time.Second, }) dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, dtRestartConfig) if err != nil { diff --git a/node/modules/dtypes/miner.go b/node/modules/dtypes/miner.go index 5c8abf3ff..9a391223d 100644 --- a/node/modules/dtypes/miner.go +++ b/node/modules/dtypes/miner.go @@ -74,10 +74,12 @@ type ConsiderUnverifiedStorageDealsConfigFunc func() (bool, error) // disable or enable unverified storage deal acceptance. type SetConsiderUnverifiedStorageDealsConfigFunc func(bool) error -// SetSealingDelay sets how long a sector waits for more deals before sealing begins. +// SetSealingConfigFunc is a function which is used to +// sets the sealing config. type SetSealingConfigFunc func(sealiface.Config) error -// GetSealingDelay returns how long a sector waits for more deals before sealing begins. +// GetSealingConfigFunc is a function which is used to +// get the sealing config. type GetSealingConfigFunc func() (sealiface.Config, error) // SetExpectedSealDurationFunc is a function which is used to set how long sealing is expected to take. @@ -93,3 +95,5 @@ type GetMaxDealStartDelayFunc func() (time.Duration, error) type StorageDealFilter func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) type RetrievalDealFilter func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) + +type RetrievalPricingFunc func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error) diff --git a/node/modules/dtypes/protector.go b/node/modules/dtypes/protector.go new file mode 100644 index 000000000..0d9625fc1 --- /dev/null +++ b/node/modules/dtypes/protector.go @@ -0,0 +1,13 @@ +package dtypes + +import ( + cid "github.com/ipfs/go-cid" +) + +type GCReferenceProtector interface { + AddProtector(func(func(cid.Cid) error) error) +} + +type NoopGCReferenceProtector struct{} + +func (p NoopGCReferenceProtector) AddProtector(func(func(cid.Cid) error) error) {} diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index e35d02811..b4420f701 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -24,9 +24,12 @@ import ( type MetadataDS datastore.Batching type ( - // UniversalBlockstore is the cold blockstore. + // UniversalBlockstore is the universal blockstore backend. UniversalBlockstore blockstore.Blockstore + // ColdBlockstore is the Cold blockstore abstraction for the splitstore + ColdBlockstore blockstore.Blockstore + // HotBlockstore is the Hot blockstore abstraction for the splitstore HotBlockstore blockstore.Blockstore @@ -83,6 +86,7 @@ type ClientDataTransfer datatransfer.Manager type ProviderDealStore *statestore.StateStore type ProviderPieceStore piecestore.PieceStore + type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator // ProviderDataTransfer is a data transfer manager for the provider diff --git a/node/modules/lp2p/pubsub.go b/node/modules/lp2p/pubsub.go index 748167d95..32b85daf3 100644 --- a/node/modules/lp2p/pubsub.go +++ b/node/modules/lp2p/pubsub.go @@ -36,6 +36,15 @@ func init() { pubsub.GossipSubHistoryLength = 10 pubsub.GossipSubGossipFactor = 0.1 } + +const ( + GossipScoreThreshold = -500 + PublishScoreThreshold = -1000 + GraylistScoreThreshold = -2500 + AcceptPXScoreThreshold = 1000 + OpportunisticGraftScoreThreshold = 3.5 +) + func ScoreKeeper() *dtypes.ScoreKeeper { return new(dtypes.ScoreKeeper) } @@ -256,11 +265,11 @@ func GossipSub(in GossipIn) (service *pubsub.PubSub, err error) { Topics: topicParams, }, &pubsub.PeerScoreThresholds{ - GossipThreshold: -500, - PublishThreshold: -1000, - GraylistThreshold: -2500, - AcceptPXThreshold: 1000, - OpportunisticGraftThreshold: 3.5, + GossipThreshold: GossipScoreThreshold, + PublishThreshold: PublishScoreThreshold, + GraylistThreshold: GraylistScoreThreshold, + AcceptPXThreshold: AcceptPXScoreThreshold, + OpportunisticGraftThreshold: OpportunisticGraftScoreThreshold, }, ), pubsub.WithPeerScoreInspect(in.Sk.Update, 10*time.Second), diff --git a/node/modules/mpoolnonceapi.go b/node/modules/mpoolnonceapi.go index 61b38e821..67f512960 100644 --- a/node/modules/mpoolnonceapi.go +++ b/node/modules/mpoolnonceapi.go @@ -63,7 +63,7 @@ func (a *MpoolNonceAPI) GetNonce(ctx context.Context, addr address.Address, tsk act, err := a.StateModule.StateGetActor(ctx, keyAddr, ts.Key()) if err != nil { if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) { - return 0, types.ErrActorNotFound + return 0, xerrors.Errorf("getting actor converted: %w", types.ErrActorNotFound) } return 0, xerrors.Errorf("getting actor: %w", err) } @@ -96,4 +96,13 @@ func (a *MpoolNonceAPI) GetNonce(ctx context.Context, addr address.Address, tsk return highestNonce, nil } +func (a *MpoolNonceAPI) GetActor(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { + act, err := a.StateModule.StateGetActor(ctx, addr, tsk) + if err != nil { + return nil, xerrors.Errorf("calling StateGetActor: %w", err) + } + + return act, nil +} + var _ messagesigner.MpoolNonceAPI = (*MpoolNonceAPI)(nil) diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 44b93a49e..3a3914e0c 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -8,8 +8,10 @@ import ( "net/http" "os" "path/filepath" + "strings" "time" + "github.com/filecoin-project/lotus/markets/pricing" "go.uber.org/fx" "go.uber.org/multierr" "golang.org/x/xerrors" @@ -43,7 +45,7 @@ import ( smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-multistore" - paramfetch "github.com/filecoin-project/go-paramfetch" + "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" @@ -66,7 +68,6 @@ import ( "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/markets" marketevents "github.com/filecoin-project/lotus/markets/loggers" - "github.com/filecoin-project/lotus/markets/retrievaladapter" lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -187,6 +188,15 @@ func AddressSelector(addrConf *config.MinerAddressConfig) func() (*storage.Addre as.TerminateControl = append(as.TerminateControl, addr) } + for _, s := range addrConf.DealPublishControl { + addr, err := address.NewFromString(s) + if err != nil { + return nil, xerrors.Errorf("parsing deal publishing control address: %w", err) + } + + as.DealPublishControl = append(as.DealPublishControl, addr) + } + return as, nil } } @@ -197,7 +207,6 @@ type StorageMinerParams struct { Lifecycle fx.Lifecycle MetricsCtx helpers.MetricsCtx API v1api.FullNode - Host host.Host MetadataDS dtypes.MetadataDS Sealer sectorstorage.SectorManager SectorIDCounter sealing.SectorIDCounter @@ -216,7 +225,6 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st lc = params.Lifecycle api = params.API sealer = params.Sealer - h = params.Host sc = params.SectorIDCounter verif = params.Verifier prover = params.Prover @@ -237,7 +245,7 @@ func StorageMiner(fc config.MinerFeeConfig) func(params StorageMinerParams) (*st return nil, err } - sm, err := storage.NewMiner(api, maddr, h, ds, sealer, sc, verif, prover, gsd, fc, j, as) + sm, err := storage.NewMiner(api, maddr, ds, sealer, sc, verif, prover, gsd, fc, j, as) if err != nil { return nil, err } @@ -430,13 +438,15 @@ func StagingDAG(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBloc // StagingGraphsync creates a graphsync instance which reads and writes blocks // to the StagingBlockstore -func StagingGraphsync(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { - graphsyncNetwork := gsnet.NewFromLibp2pHost(h) - loader := storeutil.LoaderForBlockstore(ibs) - storer := storeutil.StorerForBlockstore(ibs) - gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, loader, storer, graphsync.RejectAllRequestsByDefault()) +func StagingGraphsync(parallelTransfers uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { + graphsyncNetwork := gsnet.NewFromLibp2pHost(h) + loader := storeutil.LoaderForBlockstore(ibs) + storer := storeutil.StorerForBlockstore(ibs) + gs := graphsync.New(helpers.LifecycleCtx(mctx, lc), graphsyncNetwork, loader, storer, graphsync.RejectAllRequestsByDefault(), graphsync.MaxInProgressRequests(parallelTransfers)) - return gs + return gs + } } func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api v1api.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*lotusminer.Miner, error) { @@ -640,42 +650,60 @@ func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dt } } +func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork { + return rmnet.NewFromLibp2pHost(h) +} + +// RetrievalPricingFunc configures the pricing function to use for retrieval deals. +func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, + _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { + + return func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc, + _ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc { + if cfg.RetrievalPricing.Strategy == config.RetrievalPricingExternalMode { + return pricing.ExternalRetrievalPricingFunc(cfg.RetrievalPricing.External.Path) + } + + return retrievalimpl.DefaultPricingFunc(cfg.RetrievalPricing.Default.VerifiedDealsFreeTransfer) + } +} + // RetrievalProvider creates a new retrieval provider attached to the provider blockstore -func RetrievalProvider(h host.Host, - miner *storage.Miner, - sealer sectorstorage.SectorManager, - full v1api.FullNode, +func RetrievalProvider( + maddr dtypes.MinerAddress, + adapter retrievalmarket.RetrievalProviderNode, + netwk rmnet.RetrievalMarketNetwork, ds dtypes.MetadataDS, pieceStore dtypes.ProviderPieceStore, mds dtypes.StagingMultiDstore, dt dtypes.ProviderDataTransfer, - onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc, - offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc, + pricingFnc dtypes.RetrievalPricingFunc, userFilter dtypes.RetrievalDealFilter, ) (retrievalmarket.RetrievalProvider, error) { - adapter := retrievaladapter.NewRetrievalProviderNode(miner, sealer, full) - - maddr, err := minerAddrFromDS(ds) - if err != nil { - return nil, err - } - - netwk := rmnet.NewFromLibp2pHost(h) opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) - - return retrievalimpl.NewProvider(maddr, adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), opt) + return retrievalimpl.NewProvider(address.Address(maddr), adapter, netwk, pieceStore, mds, dt, namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")), + retrievalimpl.RetrievalPricingFunc(pricingFnc), opt) } var WorkerCallsPrefix = datastore.NewKey("/worker/calls") var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls") -func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, urls sectorstorage.URLs, sa sectorstorage.StorageAuth, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) { +func LocalStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStorage, si stores.SectorIndex, urls stores.URLs) (*stores.Local, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + return stores.NewLocal(ctx, ls, si, urls) +} + +func RemoteStorage(lstor *stores.Local, si stores.SectorIndex, sa sectorstorage.StorageAuth, sc sectorstorage.SealerConfig) *stores.Remote { + return stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit, &stores.DefaultPartialFileHandler{}) +} + +func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, lstor *stores.Local, stor *stores.Remote, ls stores.LocalStorage, si stores.SectorIndex, sc sectorstorage.SealerConfig, ds dtypes.MetadataDS) (*sectorstorage.Manager, error) { ctx := helpers.LifecycleCtx(mctx, lc) wsts := statestore.New(namespace.Wrap(ds, WorkerCallsPrefix)) smsts := statestore.New(namespace.Wrap(ds, ManagerWorkPrefix)) - sst, err := sectorstorage.New(ctx, ls, si, sc, urls, sa, wsts, smsts) + sst, err := sectorstorage.New(ctx, lstor, stor, ls, si, sc, wsts, smsts) if err != nil { return nil, err } @@ -698,6 +726,18 @@ func StorageAuth(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.Storage return sectorstorage.StorageAuth(headers), nil } +func StorageAuthWithURL(apiInfo string) func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) { + return func(ctx helpers.MetricsCtx, ca v0api.Common) (sectorstorage.StorageAuth, error) { + s := strings.Split(apiInfo, ":") + if len(s) != 2 { + return nil, errors.New("unexpected format of `apiInfo`") + } + headers := http.Header{} + headers.Add("Authorization", "Bearer "+s[0]) + return sectorstorage.StorageAuth(headers), nil + } +} + func NewConsiderOnlineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineStorageDealsConfigFunc, error) { return func() (out bool, err error) { err = readCfg(r, func(cfg *config.StorageMiner) { @@ -835,6 +875,10 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, FinalizeEarly: cfg.FinalizeEarly, + CollateralFromMinerBalance: cfg.CollateralFromMinerBalance, + AvailableBalanceBuffer: types.FIL(cfg.AvailableBalanceBuffer), + DisableCollateralFallback: cfg.DisableCollateralFallback, + BatchPreCommits: cfg.BatchPreCommits, MaxPreCommitBatch: cfg.MaxPreCommitBatch, PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait), @@ -865,6 +909,10 @@ func ToSealingConfig(cfg *config.StorageMiner) sealiface.Config { AlwaysKeepUnsealedCopy: cfg.Sealing.AlwaysKeepUnsealedCopy, FinalizeEarly: cfg.Sealing.FinalizeEarly, + CollateralFromMinerBalance: cfg.Sealing.CollateralFromMinerBalance, + AvailableBalanceBuffer: types.BigInt(cfg.Sealing.AvailableBalanceBuffer), + DisableCollateralFallback: cfg.Sealing.DisableCollateralFallback, + BatchPreCommits: cfg.Sealing.BatchPreCommits, MaxPreCommitBatch: cfg.Sealing.MaxPreCommitBatch, PreCommitBatchWait: time.Duration(cfg.Sealing.PreCommitBatchWait), diff --git a/node/modules/storageminer_svc.go b/node/modules/storageminer_svc.go new file mode 100644 index 000000000..0a4be2192 --- /dev/null +++ b/node/modules/storageminer_svc.go @@ -0,0 +1,71 @@ +package modules + +import ( + "context" + + "github.com/filecoin-project/lotus/storage/sectorblocks" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/client" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/node/modules/helpers" +) + +type MinerSealingService api.StorageMiner +type MinerStorageService api.StorageMiner + +var _ sectorblocks.SectorBuilder = *new(MinerSealingService) + +func connectMinerService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (api.StorageMiner, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + info := cliutil.ParseApiInfo(apiInfo) + addr, err := info.DialArgs("v0") + if err != nil { + return nil, xerrors.Errorf("could not get DialArgs: %w", err) + } + + log.Infof("Checking (svc) api version of %s", addr) + + mapi, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) + if err != nil { + return nil, err + } + lc.Append(fx.Hook{ + OnStart: func(ctx context.Context) error { + v, err := mapi.Version(ctx) + if err != nil { + return xerrors.Errorf("checking version: %w", err) + } + + if !v.APIVersion.EqMajorMinor(api.MinerAPIVersion0) { + return xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", api.MinerAPIVersion0, v.APIVersion) + } + + return nil + }, + OnStop: func(context.Context) error { + closer() + return nil + }}) + + return mapi, nil + } +} + +func ConnectSealingService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerSealingService, error) { + log.Info("Connecting sealing service to miner") + return connectMinerService(apiInfo)(mctx, lc) + } +} + +func ConnectStorageService(apiInfo string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (MinerStorageService, error) { + log.Info("Connecting storage service to miner") + return connectMinerService(apiInfo)(mctx, lc) + } +} diff --git a/node/node_test.go b/node/node_test.go deleted file mode 100644 index 85b3f2e6d..000000000 --- a/node/node_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package node_test - -import ( - "os" - "testing" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/lib/lotuslog" - builder "github.com/filecoin-project/lotus/node/test" - logging "github.com/ipfs/go-log/v2" -) - -func init() { - _ = logging.SetLogLevel("*", "INFO") - - policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) - policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) -} - -func TestAPI(t *testing.T) { - test.TestApis(t, builder.Builder) -} - -func TestAPIRPC(t *testing.T) { - test.TestApis(t, builder.RPCBuilder) -} - -func TestAPIDealFlow(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - blockTime := 10 * time.Millisecond - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - dealStartEpoch := abi.ChainEpoch(2 << 12) - - t.Run("TestDealFlow", func(t *testing.T) { - test.TestDealFlow(t, builder.MockSbBuilder, blockTime, false, false, dealStartEpoch) - }) - t.Run("WithExportedCAR", func(t *testing.T) { - test.TestDealFlow(t, builder.MockSbBuilder, blockTime, true, false, dealStartEpoch) - }) - t.Run("TestDoubleDealFlow", func(t *testing.T) { - test.TestDoubleDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch) - }) - t.Run("TestFastRetrievalDealFlow", func(t *testing.T) { - test.TestFastRetrievalDealFlow(t, builder.MockSbBuilder, blockTime, dealStartEpoch) - }) - t.Run("TestPublishDealsBatching", func(t *testing.T) { - test.TestPublishDealsBatching(t, builder.MockSbBuilder, blockTime, dealStartEpoch) - }) - t.Run("TestBatchDealInput", func(t *testing.T) { - test.TestBatchDealInput(t, builder.MockSbBuilder, blockTime, dealStartEpoch) - }) -} - -func TestBatchDealInput(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - logging.SetLogLevel("sectors", "DEBUG") - - blockTime := 10 * time.Millisecond - - // For these tests where the block time is artificially short, just use - // a deal start epoch that is guaranteed to be far enough in the future - // so that the deal starts sealing in time - dealStartEpoch := abi.ChainEpoch(2 << 12) - - test.TestBatchDealInput(t, builder.MockSbBuilder, blockTime, dealStartEpoch) -} - -func TestAPIDealFlowReal(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - lotuslog.SetupLogLevels() - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - // TODO: just set this globally? - oldDelay := policy.GetPreCommitChallengeDelay() - policy.SetPreCommitChallengeDelay(5) - t.Cleanup(func() { - policy.SetPreCommitChallengeDelay(oldDelay) - }) - - t.Run("basic", func(t *testing.T) { - test.TestDealFlow(t, builder.Builder, time.Second, false, false, 0) - }) - - t.Run("fast-retrieval", func(t *testing.T) { - test.TestDealFlow(t, builder.Builder, time.Second, false, true, 0) - }) - - t.Run("retrieval-second", func(t *testing.T) { - test.TestSecondDealRetrieval(t, builder.Builder, time.Second) - }) -} - -func TestDealMining(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestDealMining(t, builder.MockSbBuilder, 50*time.Millisecond, false) -} - -func TestSDRUpgrade(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - oldDelay := policy.GetPreCommitChallengeDelay() - policy.SetPreCommitChallengeDelay(5) - t.Cleanup(func() { - policy.SetPreCommitChallengeDelay(oldDelay) - }) - - test.TestSDRUpgrade(t, builder.MockSbBuilder, 50*time.Millisecond) -} - -func TestPledgeSectors(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - t.Run("1", func(t *testing.T) { - test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1) - }) - - t.Run("100", func(t *testing.T) { - test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 100) - }) - - t.Run("1000", func(t *testing.T) { - if testing.Short() { // takes ~16s - t.Skip("skipping test in short mode") - } - - test.TestPledgeSector(t, builder.MockSbBuilder, 50*time.Millisecond, 1000) - }) -} - -func TestPledgeBatching(t *testing.T) { - t.Run("100", func(t *testing.T) { - test.TestPledgeBatching(t, builder.MockSbBuilder, 50*time.Millisecond, 100) - }) - t.Run("100-before-nv13", func(t *testing.T) { - test.TestPledgeBeforeNv13(t, builder.MockSbBuilder, 50*time.Millisecond, 100) - }) -} - -func TestTapeFix(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestTapeFix(t, builder.MockSbBuilder, 2*time.Millisecond) -} - -func TestWindowedPost(t *testing.T) { - if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") - } - - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("gen", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestWindowPost(t, builder.MockSbBuilder, 2*time.Millisecond, 10) -} - -func TestTerminate(t *testing.T) { - if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") - } - - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestTerminate(t, builder.MockSbBuilder, 2*time.Millisecond) -} - -func TestCCUpgrade(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestCCUpgrade(t, builder.MockSbBuilder, 5*time.Millisecond) -} - -func TestPaymentChannels(t *testing.T) { - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("pubsub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestPaymentChannels(t, builder.MockSbBuilder, 5*time.Millisecond) -} - -func TestWindowPostDispute(t *testing.T) { - if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") - } - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("gen", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestWindowPostDispute(t, builder.MockSbBuilder, 2*time.Millisecond) -} - -func TestWindowPostDisputeFails(t *testing.T) { - if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") - } - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("gen", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond) -} - -func TestWindowPostBaseFeeNoBurn(t *testing.T) { - if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") - } - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("gen", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestWindowPostBaseFeeNoBurn(t, builder.MockSbBuilder, 2*time.Millisecond) -} - -func TestWindowPostBaseFeeBurn(t *testing.T) { - if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run") - } - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("gen", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "ERROR") - - test.TestWindowPostBaseFeeBurn(t, builder.MockSbBuilder, 2*time.Millisecond) -} - -func TestDeadlineToggling(t *testing.T) { - if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" { - t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run") - } - logging.SetLogLevel("miner", "ERROR") - logging.SetLogLevel("gen", "ERROR") - logging.SetLogLevel("chainstore", "ERROR") - logging.SetLogLevel("chain", "ERROR") - logging.SetLogLevel("sub", "ERROR") - logging.SetLogLevel("storageminer", "FATAL") - - test.TestDeadlineToggling(t, builder.MockSbBuilder, 2*time.Millisecond) -} diff --git a/node/rpc.go b/node/rpc.go new file mode 100644 index 000000000..b283f6ac1 --- /dev/null +++ b/node/rpc.go @@ -0,0 +1,196 @@ +package node + +import ( + "context" + "encoding/json" + "net" + "net/http" + _ "net/http/pprof" + "runtime" + "strconv" + + "github.com/gorilla/mux" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "go.opencensus.io/tag" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/lib/rpcenc" + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node/impl" +) + +var rpclog = logging.Logger("rpc") + +// ServeRPC serves an HTTP handler over the supplied listen multiaddr. +// +// This function spawns a goroutine to run the server, and returns immediately. +// It returns the stop function to be called to terminate the endpoint. +// +// The supplied ID is used in tracing, by inserting a tag in the context. +func ServeRPC(h http.Handler, id string, addr multiaddr.Multiaddr) (StopFunc, error) { + // Start listening to the addr; if invalid or occupied, we will fail early. + lst, err := manet.Listen(addr) + if err != nil { + return nil, xerrors.Errorf("could not listen: %w", err) + } + + // Instantiate the server and start listening. + srv := &http.Server{ + Handler: h, + BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, id)) + return ctx + }, + } + + go func() { + err = srv.Serve(manet.NetListener(lst)) + if err != http.ErrServerClosed { + rpclog.Warnf("rpc server failed: %s", err) + } + }() + + return srv.Shutdown, err +} + +// FullNodeHandler returns a full node handler, to be mounted as-is on the server. +func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.ServerOption) (http.Handler, error) { + m := mux.NewRouter() + + serveRpc := func(path string, hnd interface{}) { + rpcServer := jsonrpc.NewServer(opts...) + rpcServer.Register("Filecoin", hnd) + + var handler http.Handler = rpcServer + if permissioned { + handler = &auth.Handler{Verify: a.AuthVerify, Next: rpcServer.ServeHTTP} + } + + m.Handle(path, handler) + } + + fnapi := metrics.MetricedFullAPI(a) + if permissioned { + fnapi = api.PermissionedFullAPI(fnapi) + } + + serveRpc("/rpc/v1", fnapi) + serveRpc("/rpc/v0", &v0api.WrapperV1Full{FullNode: fnapi}) + + // Import handler + handleImportFunc := handleImport(a.(*impl.FullNodeAPI)) + if permissioned { + importAH := &auth.Handler{ + Verify: a.AuthVerify, + Next: handleImportFunc, + } + m.Handle("/rest/v0/import", importAH) + } else { + m.HandleFunc("/rest/v0/import", handleImportFunc) + } + + // debugging + m.Handle("/debug/metrics", metrics.Exporter()) + m.Handle("/debug/pprof-set/block", handleFractionOpt("BlockProfileRate", runtime.SetBlockProfileRate)) + m.Handle("/debug/pprof-set/mutex", handleFractionOpt("MutexProfileFraction", func(x int) { + runtime.SetMutexProfileFraction(x) + })) + m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + + return m, nil +} + +// MinerHandler returns a miner handler, to be mounted as-is on the server. +func MinerHandler(a api.StorageMiner, permissioned bool) (http.Handler, error) { + m := mux.NewRouter() + + mapi := metrics.MetricedStorMinerAPI(a) + if permissioned { + mapi = api.PermissionedStorMinerAPI(mapi) + } + + readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() + rpcServer := jsonrpc.NewServer(readerServerOpt) + rpcServer.Register("Filecoin", mapi) + + m.Handle("/rpc/v0", rpcServer) + m.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) + m.PathPrefix("/remote").HandlerFunc(a.(*impl.StorageMinerAPI).ServeRemote(permissioned)) + + // debugging + m.Handle("/debug/metrics", metrics.Exporter()) + m.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + + if !permissioned { + return m, nil + } + + ah := &auth.Handler{ + Verify: a.AuthVerify, + Next: m.ServeHTTP, + } + return ah, nil +} + +func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != "PUT" { + w.WriteHeader(404) + return + } + if !auth.HasPerm(r.Context(), nil, api.PermWrite) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + + c, err := a.ClientImportLocal(r.Context(), r.Body) + if err != nil { + w.WriteHeader(500) + _ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()}) + return + } + w.WriteHeader(200) + err = json.NewEncoder(w).Encode(struct{ Cid cid.Cid }{c}) + if err != nil { + rpclog.Errorf("/rest/v0/import: Writing response failed: %+v", err) + return + } + } +} + +func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { + return func(rw http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(rw, "only POST allowed", http.StatusMethodNotAllowed) + return + } + if err := r.ParseForm(); err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + + asfr := r.Form.Get("x") + if len(asfr) == 0 { + http.Error(rw, "parameter 'x' must be set", http.StatusBadRequest) + return + } + + fr, err := strconv.Atoi(asfr) + if err != nil { + http.Error(rw, err.Error(), http.StatusBadRequest) + return + } + rpclog.Infof("setting %s to %d", name, fr) + setter(fr) + } +} diff --git a/node/shutdown.go b/node/shutdown.go new file mode 100644 index 000000000..e630031da --- /dev/null +++ b/node/shutdown.go @@ -0,0 +1,56 @@ +package node + +import ( + "context" + "os" + "os/signal" + "syscall" +) + +type ShutdownHandler struct { + Component string + StopFunc StopFunc +} + +// MonitorShutdown manages shutdown requests, by watching signals and invoking +// the supplied handlers in order. +// +// It watches SIGTERM and SIGINT OS signals, as well as the trigger channel. +// When any of them fire, it calls the supplied handlers in order. If any of +// them errors, it merely logs the error. +// +// Once the shutdown has completed, it closes the returned channel. The caller +// can watch this channel +func MonitorShutdown(triggerCh <-chan struct{}, handlers ...ShutdownHandler) <-chan struct{} { + sigCh := make(chan os.Signal, 2) + out := make(chan struct{}) + + go func() { + select { + case sig := <-sigCh: + log.Warnw("received shutdown", "signal", sig) + case <-triggerCh: + log.Warn("received shutdown") + } + + log.Warn("Shutting down...") + + // Call all the handlers, logging on failure and success. + for _, h := range handlers { + if err := h.StopFunc(context.TODO()); err != nil { + log.Errorf("shutting down %s failed: %s", h.Component, err) + continue + } + log.Infof("%s shut down successfully ", h.Component) + } + + log.Warn("Graceful shutdown successful") + + // Sync all loggers. + _ = log.Sync() //nolint:errcheck + close(out) + }() + + signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) + return out +} diff --git a/node/shutdown_test.go b/node/shutdown_test.go new file mode 100644 index 000000000..15e2af93e --- /dev/null +++ b/node/shutdown_test.go @@ -0,0 +1,36 @@ +package node + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestMonitorShutdown(t *testing.T) { + signalCh := make(chan struct{}) + + // Three shutdown handlers. + var wg sync.WaitGroup + wg.Add(3) + h := ShutdownHandler{ + Component: "handler", + StopFunc: func(_ context.Context) error { + wg.Done() + return nil + }, + } + + finishCh := MonitorShutdown(signalCh, h, h, h) + + // Nothing here after 10ms. + time.Sleep(10 * time.Millisecond) + require.Len(t, finishCh, 0) + + // Now trigger the shutdown. + close(signalCh) + wg.Wait() + <-finishCh +} diff --git a/node/test/builder.go b/node/test/builder.go deleted file mode 100644 index 3024e9696..000000000 --- a/node/test/builder.go +++ /dev/null @@ -1,641 +0,0 @@ -package test - -import ( - "bytes" - "context" - "crypto/rand" - "io/ioutil" - "net" - "net/http/httptest" - "strings" - "sync" - "testing" - "time" - - "github.com/gorilla/mux" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/exitcode" - "github.com/filecoin-project/go-storedcounter" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/test" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" - "github.com/filecoin-project/lotus/chain/gen" - genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" - sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/lotus/extern/sector-storage/mock" - "github.com/filecoin-project/lotus/genesis" - lotusminer "github.com/filecoin-project/lotus/miner" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" - testing2 "github.com/filecoin-project/lotus/node/modules/testing" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/mockstorage" - miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-core/crypto" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/require" -) - -func init() { - chain.BootstrapPeerThreshold = 1 - messagepool.HeadChangeCoalesceMinDelay = time.Microsecond - messagepool.HeadChangeCoalesceMaxDelay = 2 * time.Microsecond - messagepool.HeadChangeCoalesceMergeInterval = 100 * time.Nanosecond -} - -func CreateTestStorageNode(ctx context.Context, t *testing.T, waddr address.Address, act address.Address, pk crypto.PrivKey, tnd test.TestNode, mn mocknet.Mocknet, opts node.Option) test.TestStorageNode { - r := repo.NewMemory(nil) - - lr, err := r.Lock(repo.StorageMiner) - require.NoError(t, err) - - ks, err := lr.KeyStore() - require.NoError(t, err) - - kbytes, err := pk.Bytes() - require.NoError(t, err) - - err = ks.Put("libp2p-host", types.KeyInfo{ - Type: "libp2p-host", - PrivateKey: kbytes, - }) - require.NoError(t, err) - - ds, err := lr.Datastore(context.TODO(), "/metadata") - require.NoError(t, err) - err = ds.Put(datastore.NewKey("miner-address"), act.Bytes()) - require.NoError(t, err) - - nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) - for i := 0; i < test.GenesisPreseals; i++ { - _, err := nic.Next() - require.NoError(t, err) - } - _, err = nic.Next() - require.NoError(t, err) - - err = lr.Close() - require.NoError(t, err) - - peerid, err := peer.IDFromPrivateKey(pk) - require.NoError(t, err) - - enc, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: abi.PeerID(peerid)}) - require.NoError(t, err) - - msg := &types.Message{ - To: act, - From: waddr, - Method: miner.Methods.ChangePeerID, - Params: enc, - Value: types.NewInt(0), - } - - _, err = tnd.MpoolPushMessage(ctx, msg, nil) - require.NoError(t, err) - - // start node - var minerapi api.StorageMiner - - mineBlock := make(chan lotusminer.MineReq) - stop, err := node.New(ctx, - node.StorageMiner(&minerapi), - node.Online(), - node.Repo(r), - node.Test(), - - node.MockHost(mn), - - node.Override(new(v1api.FullNode), tnd), - node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, act)), - - opts, - ) - if err != nil { - t.Fatalf("failed to construct node: %v", err) - } - - t.Cleanup(func() { _ = stop(context.Background()) }) - - /*// Bootstrap with full node - remoteAddrs, err := tnd.NetAddrsListen(ctx) - require.NoError(t, err) - - err = minerapi.NetConnect(ctx, remoteAddrs) - require.NoError(t, err)*/ - mineOne := func(ctx context.Context, req lotusminer.MineReq) error { - select { - case mineBlock <- req: - return nil - case <-ctx.Done(): - return ctx.Err() - } - } - - return test.TestStorageNode{StorageMiner: minerapi, MineOne: mineOne, Stop: stop} -} - -func storageBuilder(parentNode test.TestNode, mn mocknet.Mocknet, opts node.Option) test.StorageBuilder { - return func(ctx context.Context, t *testing.T, spt abi.RegisteredSealProof, owner address.Address) test.TestStorageNode { - pk, _, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - minerPid, err := peer.IDFromPrivateKey(pk) - require.NoError(t, err) - - params, serr := actors.SerializeParams(&power2.CreateMinerParams{ - Owner: owner, - Worker: owner, - SealProofType: spt, - Peer: abi.PeerID(minerPid), - }) - require.NoError(t, serr) - - createStorageMinerMsg := &types.Message{ - To: power.Address, - From: owner, - Value: big.Zero(), - - Method: power.Methods.CreateMiner, - Params: params, - - GasLimit: 0, - GasPremium: big.NewInt(5252), - } - - signed, err := parentNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil) - require.NoError(t, err) - - mw, err := parentNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) - require.NoError(t, err) - require.Equal(t, exitcode.Ok, mw.Receipt.ExitCode) - - var retval power2.CreateMinerReturn - err = retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)) - require.NoError(t, err) - - return CreateTestStorageNode(ctx, t, owner, retval.IDAddress, pk, parentNode, mn, opts) - } -} - -func Builder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockBuilderOpts(t, fullOpts, storage, false) -} - -func MockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockSbBuilderOpts(t, fullOpts, storage, false) -} - -func RPCBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockBuilderOpts(t, fullOpts, storage, true) -} - -func RPCMockSbBuilder(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner) ([]test.TestNode, []test.TestStorageNode) { - return mockSbBuilderOpts(t, fullOpts, storage, true) -} - -func mockBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - mn := mocknet.New(ctx) - - fulls := make([]test.TestNode, len(fullOpts)) - storers := make([]test.TestStorageNode, len(storage)) - - pk, _, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - minerPid, err := peer.IDFromPrivateKey(pk) - require.NoError(t, err) - - var genbuf bytes.Buffer - - if len(storage) > 1 { - panic("need more peer IDs") - } - // PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE - // TODO: would be great if there was a better way to fake the preseals - - var genms []genesis.Miner - var maddrs []address.Address - var genaccs []genesis.Actor - var keys []*wallet.Key - - var presealDirs []string - for i := 0; i < len(storage); i++ { - maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i)) - if err != nil { - t.Fatal(err) - } - tdir, err := ioutil.TempDir("", "preseal-memgen") - if err != nil { - t.Fatal(err) - } - genm, k, err := seed.PreSeal(maddr, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, test.GenesisPreseals, tdir, []byte("make genesis mem random"), nil, true) - if err != nil { - t.Fatal(err) - } - genm.PeerId = minerPid - - wk, err := wallet.NewKey(*k) - if err != nil { - return nil, nil - } - - genaccs = append(genaccs, genesis.Actor{ - Type: genesis.TAccount, - Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)), - Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(), - }) - - keys = append(keys, wk) - presealDirs = append(presealDirs, tdir) - maddrs = append(maddrs, maddr) - genms = append(genms, *genm) - } - templ := &genesis.Template{ - Accounts: genaccs, - Miners: genms, - NetworkName: "test", - Timestamp: uint64(time.Now().Unix() - 10000), // some time sufficiently far in the past - VerifregRootKey: gen.DefaultVerifregRootkeyActor, - RemainderAccount: gen.DefaultRemainderAccountActor, - } - - // END PRESEAL SECTION - - for i := 0; i < len(fullOpts); i++ { - var genesis node.Option - if i == 0 { - genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ)) - } else { - genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes())) - } - - stop, err := node.New(ctx, - node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)), - node.Online(), - node.Repo(repo.NewMemory(nil)), - node.MockHost(mn), - node.Test(), - - genesis, - - fullOpts[i].Opts(fulls), - ) - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { _ = stop(context.Background()) }) - - if rpc { - fulls[i] = fullRpc(t, fulls[i]) - } - - fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options()) - } - - for i, def := range storage { - // TODO: support non-bootstrap miners - if i != 0 { - t.Fatal("only one storage node supported") - } - if def.Full != 0 { - t.Fatal("storage nodes only supported on the first full node") - } - - f := fulls[def.Full] - if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil { - t.Fatal(err) - } - if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil { - t.Fatal(err) - } - - genMiner := maddrs[i] - wa := genms[i].Worker - - opts := def.Opts - if opts == nil { - opts = node.Options() - } - storers[i] = CreateTestStorageNode(ctx, t, wa, genMiner, pk, f, mn, opts) - if err := storers[i].StorageAddLocal(ctx, presealDirs[i]); err != nil { - t.Fatalf("%+v", err) - } - /* - sma := storers[i].StorageMiner.(*impl.StorageMinerAPI) - - psd := presealDirs[i] - */ - if rpc { - storers[i] = storerRpc(t, storers[i]) - } - } - - if err := mn.LinkAll(); err != nil { - t.Fatal(err) - } - - if len(storers) > 0 { - // Mine 2 blocks to setup some CE stuff in some actors - var wait sync.Mutex - wait.Lock() - - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) { - wait.Unlock() - }) - - wait.Lock() - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(epoch abi.ChainEpoch) { - wait.Unlock() - }) - wait.Lock() - } - - return fulls, storers -} - -func mockSbBuilderOpts(t *testing.T, fullOpts []test.FullNodeOpts, storage []test.StorageMiner, rpc bool) ([]test.TestNode, []test.TestStorageNode) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - mn := mocknet.New(ctx) - - fulls := make([]test.TestNode, len(fullOpts)) - storers := make([]test.TestStorageNode, len(storage)) - - var genbuf bytes.Buffer - - // PRESEAL SECTION, TRY TO REPLACE WITH BETTER IN THE FUTURE - // TODO: would be great if there was a better way to fake the preseals - - var genms []genesis.Miner - var genaccs []genesis.Actor - var maddrs []address.Address - var keys []*wallet.Key - var pidKeys []crypto.PrivKey - for i := 0; i < len(storage); i++ { - maddr, err := address.NewIDAddress(genesis2.MinerStart + uint64(i)) - if err != nil { - t.Fatal(err) - } - - preseals := storage[i].Preseal - if preseals == test.PresealGenesis { - preseals = test.GenesisPreseals - } - - genm, k, err := mockstorage.PreSeal(abi.RegisteredSealProof_StackedDrg2KiBV1, maddr, preseals) - if err != nil { - t.Fatal(err) - } - - pk, _, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - minerPid, err := peer.IDFromPrivateKey(pk) - require.NoError(t, err) - - genm.PeerId = minerPid - - wk, err := wallet.NewKey(*k) - if err != nil { - return nil, nil - } - - genaccs = append(genaccs, genesis.Actor{ - Type: genesis.TAccount, - Balance: big.Mul(big.NewInt(400000000), types.NewInt(build.FilecoinPrecision)), - Meta: (&genesis.AccountMeta{Owner: wk.Address}).ActorMeta(), - }) - - keys = append(keys, wk) - pidKeys = append(pidKeys, pk) - maddrs = append(maddrs, maddr) - genms = append(genms, *genm) - } - templ := &genesis.Template{ - Accounts: genaccs, - Miners: genms, - NetworkName: "test", - Timestamp: uint64(time.Now().Unix()) - (build.BlockDelaySecs * 20000), - VerifregRootKey: gen.DefaultVerifregRootkeyActor, - RemainderAccount: gen.DefaultRemainderAccountActor, - } - - // END PRESEAL SECTION - - for i := 0; i < len(fullOpts); i++ { - var genesis node.Option - if i == 0 { - genesis = node.Override(new(modules.Genesis), testing2.MakeGenesisMem(&genbuf, *templ)) - } else { - genesis = node.Override(new(modules.Genesis), modules.LoadGenesis(genbuf.Bytes())) - } - - stop, err := node.New(ctx, - node.FullAPI(&fulls[i].FullNode, node.Lite(fullOpts[i].Lite)), - node.Online(), - node.Repo(repo.NewMemory(nil)), - node.MockHost(mn), - node.Test(), - - node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), - node.Override(new(ffiwrapper.Prover), mock.MockProver), - - // so that we subscribe to pubsub topics immediately - node.Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(true)), - - genesis, - - fullOpts[i].Opts(fulls), - ) - if err != nil { - t.Fatalf("%+v", err) - } - - t.Cleanup(func() { _ = stop(context.Background()) }) - - if rpc { - fulls[i] = fullRpc(t, fulls[i]) - } - - fulls[i].Stb = storageBuilder(fulls[i], mn, node.Options( - node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) { - return mock.NewMockSectorMgr(nil), nil - }), - node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), - node.Override(new(ffiwrapper.Prover), mock.MockProver), - node.Unset(new(*sectorstorage.Manager)), - )) - } - - for i, def := range storage { - // TODO: support non-bootstrap miners - - minerID := abi.ActorID(genesis2.MinerStart + uint64(i)) - - if def.Full != 0 { - t.Fatal("storage nodes only supported on the first full node") - } - - f := fulls[def.Full] - if _, err := f.FullNode.WalletImport(ctx, &keys[i].KeyInfo); err != nil { - return nil, nil - } - if err := f.FullNode.WalletSetDefault(ctx, keys[i].Address); err != nil { - return nil, nil - } - - sectors := make([]abi.SectorID, len(genms[i].Sectors)) - for i, sector := range genms[i].Sectors { - sectors[i] = abi.SectorID{ - Miner: minerID, - Number: sector.SectorID, - } - } - - opts := def.Opts - if opts == nil { - opts = node.Options() - } - storers[i] = CreateTestStorageNode(ctx, t, genms[i].Worker, maddrs[i], pidKeys[i], f, mn, node.Options( - node.Override(new(sectorstorage.SectorManager), func() (sectorstorage.SectorManager, error) { - return mock.NewMockSectorMgr(sectors), nil - }), - node.Override(new(ffiwrapper.Verifier), mock.MockVerifier), - node.Override(new(ffiwrapper.Prover), mock.MockProver), - node.Unset(new(*sectorstorage.Manager)), - opts, - )) - - if rpc { - storers[i] = storerRpc(t, storers[i]) - } - } - - if err := mn.LinkAll(); err != nil { - t.Fatal(err) - } - - if len(storers) > 0 { - // Mine 2 blocks to setup some CE stuff in some actors - var wait sync.Mutex - wait.Lock() - - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) { - wait.Unlock() - }) - wait.Lock() - test.MineUntilBlock(ctx, t, fulls[0], storers[0], func(abi.ChainEpoch) { - wait.Unlock() - }) - wait.Lock() - } - - return fulls, storers -} - -func fullRpc(t *testing.T, nd test.TestNode) test.TestNode { - ma, listenAddr, err := CreateRPCServer(t, map[string]interface{}{ - "/rpc/v1": nd, - "/rpc/v0": &v0api.WrapperV1Full{FullNode: nd}, - }) - require.NoError(t, err) - - var stop func() - var full test.TestNode - full.FullNode, stop, err = client.NewFullNodeRPCV1(context.Background(), listenAddr+"/rpc/v1", nil) - require.NoError(t, err) - t.Cleanup(stop) - - full.ListenAddr = ma - return full -} - -func storerRpc(t *testing.T, nd test.TestStorageNode) test.TestStorageNode { - ma, listenAddr, err := CreateRPCServer(t, map[string]interface{}{ - "/rpc/v0": nd, - }) - require.NoError(t, err) - - var stop func() - var storer test.TestStorageNode - storer.StorageMiner, stop, err = client.NewStorageMinerRPCV0(context.Background(), listenAddr+"/rpc/v0", nil) - require.NoError(t, err) - t.Cleanup(stop) - - storer.ListenAddr = ma - storer.MineOne = nd.MineOne - return storer -} - -func CreateRPCServer(t *testing.T, handlers map[string]interface{}) (multiaddr.Multiaddr, string, error) { - m := mux.NewRouter() - for path, handler := range handlers { - rpcServer := jsonrpc.NewServer() - rpcServer.Register("Filecoin", handler) - m.Handle(path, rpcServer) - } - testServ := httptest.NewServer(m) // todo: close - t.Cleanup(testServ.Close) - t.Cleanup(testServ.CloseClientConnections) - - addr := testServ.Listener.Addr() - listenAddr := "ws://" + addr.String() - ma, err := parseWSMultiAddr(addr) - if err != nil { - return nil, "", err - } - return ma, listenAddr, err -} - -func parseWSMultiAddr(addr net.Addr) (multiaddr.Multiaddr, error) { - host, port, err := net.SplitHostPort(addr.String()) - if err != nil { - return nil, err - } - ma, err := multiaddr.NewMultiaddr("/ip4/" + host + "/" + addr.Network() + "/" + port + "/ws") - if err != nil { - return nil, err - } - return ma, nil -} - -func WSMultiAddrToString(addr multiaddr.Multiaddr) (string, error) { - parts := strings.Split(addr.String(), "/") - if len(parts) != 6 || parts[0] != "" { - return "", xerrors.Errorf("Malformed ws multiaddr %s", addr) - } - - host := parts[2] - port := parts[4] - proto := parts[5] - - return proto + "://" + host + ":" + port + "/rpc/v0", nil -} diff --git a/node/testopts.go b/node/testopts.go index f348fc555..ca1e81127 100644 --- a/node/testopts.go +++ b/node/testopts.go @@ -10,8 +10,8 @@ import ( func MockHost(mn mocknet.Mocknet) Option { return Options( - ApplyIf(func(s *Settings) bool { return !s.Online }, - Error(errors.New("MockHost must be specified after Online")), + ApplyIf(func(s *Settings) bool { return !s.Base }, + Error(errors.New("MockHost must be specified after Base")), ), Override(new(lp2p.RawHost), lp2p.MockHost), diff --git a/paychmgr/settler/settler.go b/paychmgr/settler/settler.go index 676b15c27..ce31ab223 100644 --- a/paychmgr/settler/settler.go +++ b/paychmgr/settler/settler.go @@ -96,6 +96,7 @@ func (pcs *paymentChannelSettler) messageHandler(msg *types.Message, rec *types. msgLookup, err := pcs.api.StateWaitMsg(pcs.ctx, submitMessageCID, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Errorf("submitting voucher: %s", err.Error()) + return } if msgLookup.Receipt.ExitCode != 0 { log.Errorf("failed submitting voucher: %+v", voucher) diff --git a/scripts/bash-completion/lotus b/scripts/bash-completion/lotus index 20c312b6c..b572ab320 100644 --- a/scripts/bash-completion/lotus +++ b/scripts/bash-completion/lotus @@ -1,10 +1,18 @@ #!/usr/bin/env bash + _cli_bash_autocomplete() { - local cur opts base; - COMPREPLY=(); - cur="${COMP_WORDS[COMP_CWORD]}"; - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-completion ); - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ); - return 0; -}; -complete -F _cli_bash_autocomplete lotus \ No newline at end of file + if [[ "${COMP_WORDS[0]}" != "source" ]]; then + local cur opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + if [[ "$cur" == "-"* ]]; then + opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} ${cur} --generate-bash-completion ) + else + opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) + fi + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + fi +} + +complete -o bashdefault -o default -o nospace -F _cli_bash_autocomplete lotus lotus-miner lotus-worker diff --git a/scripts/bash-completion/lotus-miner b/scripts/bash-completion/lotus-miner deleted file mode 100644 index df5cc01cc..000000000 --- a/scripts/bash-completion/lotus-miner +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -_cli_bash_autocomplete() { - local cur opts base; - COMPREPLY=(); - cur="${COMP_WORDS[COMP_CWORD]}"; - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-completion ); - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ); - return 0; -}; -complete -F _cli_bash_autocomplete lotus-miner \ No newline at end of file diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py new file mode 100644 index 000000000..8018962e9 --- /dev/null +++ b/scripts/generate-lotus-cli.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# Generate lotus command lines documents as text and markdown in folder "lotus/documentation/en". +# Python 2.7 + +import os + + +def generate_lotus_cli(prog): + output_folder = 'documentation/en' + md_file = open('%s/cli-%s.md' % (output_folder, prog), 'w') # set the name of md output + + def get_cmd_recursively(cur_cmd): + depth = cur_cmd.count(' ') + md_file.writelines(('\n' * min(depth, 1)) + ('#' * depth) + '# ' + cur_cmd[2:] + '\n') + + cmd_flag = False + + print('> ' + cur_cmd) + cmd_help_output = os.popen(cur_cmd + ' -h') + cmd_help_output_lines = cmd_help_output.readlines() + + md_file.writelines('```\n') + md_file.writelines(cmd_help_output_lines) + md_file.writelines('```\n') + + for line in cmd_help_output_lines: + try: + line = line.strip() + if line == 'COMMANDS:': + cmd_flag = True + if cmd_flag is True and line == '': + cmd_flag = False + if cmd_flag is True and line[-1] != ':' and 'help, h' not in line: + gap_pos = 0 + sub_cmd = line + if ' ' in line: + gap_pos = sub_cmd.index(' ') + if gap_pos: + sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] + get_cmd_recursively(sub_cmd) + except Exception as e: + print('Fail to deal with "%s" with error:\n%s' % (line, e)) + + get_cmd_recursively('./' + prog) + md_file.close() + + +if __name__ == "__main__": + os.putenv("LOTUS_VERSION_IGNORE_COMMIT", "1") + generate_lotus_cli('lotus') + generate_lotus_cli('lotus-miner') + generate_lotus_cli('lotus-worker') diff --git a/scripts/make-completions.sh b/scripts/make-completions.sh deleted file mode 100755 index 1bfd59bf3..000000000 --- a/scripts/make-completions.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -# scripts/make-completions.sh [progname] - -echo '#!/usr/bin/env bash' > "scripts/bash-completion/$1" -echo '#!/usr/bin/env zsh' > "scripts/zsh-completion/$1" - -$1 --init-completion=bash >> "scripts/bash-completion/$1" -$1 --init-completion=zsh >> "scripts/zsh-completion/$1" diff --git a/scripts/zsh-completion/lotus b/scripts/zsh-completion/lotus index bbd886ae4..8d21370b2 100644 --- a/scripts/zsh-completion/lotus +++ b/scripts/zsh-completion/lotus @@ -1,12 +1,23 @@ -#!/usr/bin/env zsh -autoload -U compinit && compinit; -autoload -U bashcompinit && bashcompinit; -_cli_bash_autocomplete() { - local cur opts base; - COMPREPLY=(); - cur="${COMP_WORDS[COMP_CWORD]}"; - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-completion ); - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ); - return 0; -}; -complete -F _cli_bash_autocomplete lotus \ No newline at end of file +#compdef lotus lotus-miner lotus-worker + +_cli_zsh_autocomplete() { + + local -a opts + local cur + cur=${words[-1]} + if [[ "$cur" == "-"* ]]; then + opts=("${(@f)$(_CLI_ZSH_AUTOCOMPLETE_HACK=1 ${words[@]:0:#words[@]-1} ${cur} --generate-bash-completion)}") + else + opts=("${(@f)$(_CLI_ZSH_AUTOCOMPLETE_HACK=1 ${words[@]:0:#words[@]-1} --generate-bash-completion)}") + fi + + if [[ "${opts[1]}" != "" ]]; then + _describe 'values' opts + else + _files + fi + + return +} + +compdef _cli_zsh_autocomplete lotus lotus-miner lotus-worker diff --git a/scripts/zsh-completion/lotus-miner b/scripts/zsh-completion/lotus-miner deleted file mode 100644 index 3e23749e6..000000000 --- a/scripts/zsh-completion/lotus-miner +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env zsh -autoload -U compinit && compinit; -autoload -U bashcompinit && bashcompinit; -_cli_bash_autocomplete() { - local cur opts base; - COMPREPLY=(); - cur="${COMP_WORDS[COMP_CWORD]}"; - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-completion ); - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ); - return 0; -}; -complete -F _cli_bash_autocomplete lotus-miner \ No newline at end of file diff --git a/snap/local/icon.svg b/snap/local/icon.svg new file mode 100644 index 000000000..da992296a --- /dev/null +++ b/snap/local/icon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml new file mode 100644 index 000000000..472621c2a --- /dev/null +++ b/snap/snapcraft.yaml @@ -0,0 +1,76 @@ +name: lotus-filecoin +base: core20 +version: latest +summary: filecoin daemon/client +icon: snap/local/icon.svg +description: | + Filecoin is a peer-to-peer network that stores files on the internet + with built-in economic incentives to ensure files are stored reliably over time + + For documentation and additional information, please see the following resources + + https://filecoin.io + + https://fil.org + + https://docs.filecoin.io + + https://github.com/filecoin-project/lotus + +grade: devel +confinement: strict + +parts: + lotus: + plugin: make + source: ./ + build-snaps: + - go + - rustup + build-packages: + - git + - jq + - libhwloc-dev + - ocl-icd-opencl-dev + - pkg-config + stage-packages: + - libhwloc15 + - ocl-icd-libopencl1 + override-build: | + LDFLAGS="" make lotus lotus-miner lotus-worker + cp lotus lotus-miner lotus-worker $SNAPCRAFT_PART_INSTALL + +apps: + lotus: + command: lotus + plugs: + - network + - network-bind + - home + environment: + FIL_PROOFS_PARAMETER_CACHE: $SNAP_USER_COMMON/filecoin-proof-parameters + LOTUS_PATH: $SNAP_USER_COMMON/lotus + LOTUS_MINER_PATH: $SNAP_USER_COMMON/lotus-miner + LOTUS_WORKER_PATH: $SNAP_USER_COMMON/lotus-worker + lotus-miner: + command: lotus-miner + plugs: + - network + - network-bind + - opengl + environment: + FIL_PROOFS_PARAMETER_CACHE: $SNAP_USER_COMMON/filecoin-proof-parameters + LOTUS_PATH: $SNAP_USER_COMMON/lotus + LOTUS_MINER_PATH: $SNAP_USER_COMMON/lotus-miner + LOTUS_WORKER_PATH: $SNAP_USER_COMMON/lotus-worker + lotus-worker: + command: lotus-worker + plugs: + - network + - network-bind + - opengl + environment: + FIL_PROOFS_PARAMETER_CACHE: $SNAP_USER_COMMON/filecoin-proof-parameters + LOTUS_PATH: $SNAP_USER_COMMON/lotus + LOTUS_MINER_PATH: $SNAP_USER_COMMON/lotus-miner + LOTUS_WORKER_PATH: $SNAP_USER_COMMON/lotus-worker diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index e8ddb2125..531fe2d03 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -32,10 +32,10 @@ import ( var _ sealing.SealingAPI = new(SealingAPIAdapter) type SealingAPIAdapter struct { - delegate storageMinerApi + delegate fullNodeFilteredAPI } -func NewSealingAPIAdapter(api storageMinerApi) SealingAPIAdapter { +func NewSealingAPIAdapter(api fullNodeFilteredAPI) SealingAPIAdapter { return SealingAPIAdapter{delegate: api} } @@ -76,6 +76,15 @@ func (s SealingAPIAdapter) StateMinerInfo(ctx context.Context, maddr address.Add return s.delegate.StateMinerInfo(ctx, maddr, tsk) } +func (s SealingAPIAdapter) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (big.Int, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerAvailableBalance(ctx, maddr, tsk) +} + func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (address.Address, error) { // TODO: update storage-fsm to just StateMinerInfo mi, err := s.StateMinerInfo(ctx, maddr, tok) diff --git a/storage/addresses.go b/storage/addresses.go index a8e5e7101..f8f06ed98 100644 --- a/storage/addresses.go +++ b/storage/addresses.go @@ -5,6 +5,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -24,6 +25,12 @@ type AddressSelector struct { } func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + if as == nil { + // should only happen in some tests + log.Warnw("smart address selection disabled, using worker address") + return mi.Worker, big.Zero(), nil + } + var addrs []address.Address switch use { case api.PreCommitAddr: @@ -32,6 +39,8 @@ func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi m addrs = append(addrs, as.CommitControl...) case api.TerminateSectorsAddr: addrs = append(addrs, as.TerminateControl...) + case api.DealPublishAddr: + addrs = append(addrs, as.DealPublishControl...) default: defaultCtl := map[address.Address]struct{}{} for _, a := range mi.ControlAddresses { @@ -43,6 +52,7 @@ func (as *AddressSelector) AddressFor(ctx context.Context, a addrSelectApi, mi m configCtl := append([]address.Address{}, as.PreCommitControl...) configCtl = append(configCtl, as.CommitControl...) configCtl = append(configCtl, as.TerminateControl...) + configCtl = append(configCtl, as.DealPublishControl...) for _, addr := range configCtl { if addr.Protocol() != address.ID { diff --git a/storage/miner.go b/storage/miner.go index e1829a7d6..cdacc2734 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -5,24 +5,22 @@ import ( "errors" "time" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-bitfield" - "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/specs-storage/storage" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" @@ -41,10 +39,17 @@ import ( var log = logging.Logger("storageminer") +// Miner is the central miner entrypoint object inside Lotus. It is +// instantiated in the node builder, along with the WindowPoStScheduler. +// +// This object is the owner of the sealing pipeline. Most of the actual logic +// lives in the storage-sealing module (sealing.Sealing), and the Miner object +// exposes it to the rest of the system by proxying calls. +// +// Miner#Run starts the sealing FSM. type Miner struct { - api storageMinerApi + api fullNodeFilteredAPI feeCfg config.MinerFeeConfig - h host.Host sealer sectorstorage.SectorManager ds datastore.Batching sc sealing.SectorIDCounter @@ -71,7 +76,9 @@ type SealingStateEvt struct { Error string } -type storageMinerApi interface { +// fullNodeFilteredAPI is the subset of the full node API the Miner needs from +// a Lotus full node. +type fullNodeFilteredAPI interface { // Call a read only method on actors (no interaction with the chain required) StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) @@ -79,6 +86,7 @@ type storageMinerApi interface { StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) + StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok types.TipSetKey) (types.BigInt, error) StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) @@ -117,11 +125,21 @@ type storageMinerApi interface { WalletHas(context.Context, address.Address) (bool, error) } -func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datastore.Batching, sealer sectorstorage.SectorManager, sc sealing.SectorIDCounter, verif ffiwrapper.Verifier, prover ffiwrapper.Prover, gsd dtypes.GetSealingConfigFunc, feeCfg config.MinerFeeConfig, journal journal.Journal, as *AddressSelector) (*Miner, error) { +// NewMiner creates a new Miner object. +func NewMiner(api fullNodeFilteredAPI, + maddr address.Address, + ds datastore.Batching, + sealer sectorstorage.SectorManager, + sc sealing.SectorIDCounter, + verif ffiwrapper.Verifier, + prover ffiwrapper.Prover, + gsd dtypes.GetSealingConfigFunc, + feeCfg config.MinerFeeConfig, + journal journal.Journal, + as *AddressSelector) (*Miner, error) { m := &Miner{ api: api, feeCfg: feeCfg, - h: h, sealer: sealer, ds: ds, sc: sc, @@ -138,6 +156,7 @@ func NewMiner(api storageMinerApi, maddr address.Address, h host.Host, ds datast return m, nil } +// Run starts the sealing FSM in the background, running preliminary checks first. func (m *Miner) Run(ctx context.Context) error { if err := m.runPreflightChecks(ctx); err != nil { return xerrors.Errorf("miner preflight checks failed: %w", err) @@ -148,17 +167,37 @@ func (m *Miner) Run(ctx context.Context) error { return xerrors.Errorf("getting miner info: %w", err) } - evts := events.NewEvents(ctx, m.api) - adaptedAPI := NewSealingAPIAdapter(m.api) - // TODO: Maybe we update this policy after actor upgrades? - pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, policy.GetMaxSectorExpirationExtension()-(md.WPoStProvingPeriod*2), md.PeriodStart%md.WPoStProvingPeriod) + var ( + // consumer of chain head changes. + evts = events.NewEvents(ctx, m.api) + evtsAdapter = NewEventsAdapter(evts) - as := func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return m.addrSel.AddressFor(ctx, m.api, mi, use, goodFunds, minFunds) - } + // Create a shim to glue the API required by the sealing component + // with the API that Lotus is capable of providing. + // The shim translates between "tipset tokens" and tipset keys, and + // provides extra methods. + adaptedAPI = NewSealingAPIAdapter(m.api) - m.sealing = sealing.New(ctx, adaptedAPI, m.feeCfg, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, sealing.GetSealingConfigFunc(m.getSealConfig), m.handleSealingNotifications, as) + // Instantiate a precommit policy. + defaultDuration = policy.GetMaxSectorExpirationExtension() - (md.WPoStProvingPeriod * 2) + provingBoundary = md.PeriodStart % md.WPoStProvingPeriod + // TODO: Maybe we update this policy after actor upgrades? + pcp = sealing.NewBasicPreCommitPolicy(adaptedAPI, defaultDuration, provingBoundary) + + // address selector. + as = func(ctx context.Context, mi miner.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { + return m.addrSel.AddressFor(ctx, m.api, mi, use, goodFunds, minFunds) + } + + // sealing configuration. + cfg = sealing.GetSealingConfigFunc(m.getSealConfig) + ) + + // Instantiate the sealing FSM. + m.sealing = sealing.New(ctx, adaptedAPI, m.feeCfg, evtsAdapter, m.maddr, m.ds, m.sealer, m.sc, m.verif, m.prover, &pcp, cfg, m.handleSealingNotifications, as) + + // Run the sealing FSM. go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function return nil @@ -180,6 +219,7 @@ func (m *Miner) Stop(ctx context.Context) error { return m.sealing.Stop(ctx) } +// runPreflightChecks verifies that preconditions to run the miner are satisfied. func (m *Miner) runPreflightChecks(ctx context.Context) error { mi, err := m.api.StateMinerInfo(ctx, m.maddr, types.EmptyTSK) if err != nil { diff --git a/storage/sealing.go b/storage/miner_sealing.go similarity index 53% rename from storage/sealing.go rename to storage/miner_sealing.go index 6a1195826..a5e838b89 100644 --- a/storage/sealing.go +++ b/storage/miner_sealing.go @@ -2,16 +2,19 @@ package storage import ( "context" - "io" "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/storage/sectorblocks" ) // TODO: refactor this to be direct somehow @@ -20,10 +23,6 @@ func (m *Miner) Address() address.Address { return m.sealing.Address() } -func (m *Miner) AddPieceToAnySector(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - return m.sealing.AddPieceToAnySector(ctx, size, r, d) -} - func (m *Miner) StartPackingSector(sectorNum abi.SectorNumber) error { return m.sealing.StartPacking(sectorNum) } @@ -32,10 +31,6 @@ func (m *Miner) ListSectors() ([]sealing.SectorInfo, error) { return m.sealing.ListSectors() } -func (m *Miner) GetSectorInfo(sid abi.SectorNumber) (sealing.SectorInfo, error) { - return m.sealing.GetSectorInfo(sid) -} - func (m *Miner) PledgeSector(ctx context.Context) (storage.SectorRef, error) { return m.sealing.PledgeSector(ctx) } @@ -83,3 +78,73 @@ func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { return m.sealing.IsMarkedForUpgrade(id) } + +func (m *Miner) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) { + return m.sealing.SectorAddPieceToAny(ctx, size, r, d) +} + +func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { + if showOnChainInfo { + return api.SectorInfo{}, xerrors.Errorf("on-chain info not supported") + } + + info, err := m.sealing.GetSectorInfo(sid) + if err != nil { + return api.SectorInfo{}, err + } + + deals := make([]abi.DealID, len(info.Pieces)) + for i, piece := range info.Pieces { + if piece.DealInfo == nil { + continue + } + deals[i] = piece.DealInfo.DealID + } + + log := make([]api.SectorLog, len(info.Log)) + for i, l := range info.Log { + log[i] = api.SectorLog{ + Kind: l.Kind, + Timestamp: l.Timestamp, + Trace: l.Trace, + Message: l.Message, + } + } + + sInfo := api.SectorInfo{ + SectorID: sid, + State: api.SectorState(info.State), + CommD: info.CommD, + CommR: info.CommR, + Proof: info.Proof, + Deals: deals, + Ticket: api.SealTicket{ + Value: info.TicketValue, + Epoch: info.TicketEpoch, + }, + Seed: api.SealSeed{ + Value: info.SeedValue, + Epoch: info.SeedEpoch, + }, + PreCommitMsg: info.PreCommitMessage, + CommitMsg: info.CommitMessage, + Retries: info.InvalidProofs, + ToUpgrade: m.IsMarkedForUpgrade(sid), + + LastErr: info.LastErr, + Log: log, + // on chain info + SealProof: 0, + Activation: 0, + Expiration: 0, + DealWeight: big.Zero(), + VerifiedDealWeight: big.Zero(), + InitialPledge: big.Zero(), + OnTime: 0, + Early: 0, + } + + return sInfo, nil +} + +var _ sectorblocks.SectorBuilder = &Miner{} diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index bc8456a1f..ad4ffc0db 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -16,11 +16,10 @@ import ( cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/abi" - sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage" ) type SealSerialization uint8 @@ -48,17 +47,22 @@ func DsKeyToDealID(key datastore.Key) (uint64, error) { return dealID, nil } +type SectorBuilder interface { + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d api.PieceDealInfo) (api.SectorOffset, error) + SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) +} + type SectorBlocks struct { - *storage.Miner + SectorBuilder keys datastore.Batching keyLk sync.Mutex } -func NewSectorBlocks(miner *storage.Miner, ds dtypes.MetadataDS) *SectorBlocks { +func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks { sbc := &SectorBlocks{ - Miner: miner, - keys: namespace.Wrap(ds, dsPrefix), + SectorBuilder: sb, + keys: namespace.Wrap(ds, dsPrefix), } return sbc @@ -96,19 +100,19 @@ func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, o return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow } -func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d sealing.DealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { - sn, offset, err := st.Miner.AddPieceToAnySector(ctx, size, r, d) +func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { + so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d) if err != nil { return 0, 0, err } // TODO: DealID has very low finality here - err = st.writeRef(d.DealID, sn, offset, size) + err = st.writeRef(d.DealID, so.Sector, so.Offset, size) if err != nil { return 0, 0, xerrors.Errorf("writeRef: %w", err) } - return sn, offset, nil + return so.Sector, so.Offset, nil } func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go index 188d7e93a..7b80f2744 100644 --- a/storage/wdpost_changehandler.go +++ b/storage/wdpost_changehandler.go @@ -21,22 +21,25 @@ const ( type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error) type CompleteSubmitPoSTCb func(err error) -type changeHandlerAPI interface { +// wdPoStCommands is the subset of the WindowPoStScheduler + full node APIs used +// by the changeHandler to execute actions and query state. +type wdPoStCommands interface { StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + startGeneratePoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, onComplete CompleteGeneratePoSTCb) context.CancelFunc startSubmitPoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, posts []miner.SubmitWindowedPoStParams, onComplete CompleteSubmitPoSTCb) context.CancelFunc onAbort(ts *types.TipSet, deadline *dline.Info) - failPost(err error, ts *types.TipSet, deadline *dline.Info) + recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) } type changeHandler struct { - api changeHandlerAPI + api wdPoStCommands actor address.Address proveHdlr *proveHandler submitHdlr *submitHandler } -func newChangeHandler(api changeHandlerAPI, actor address.Address) *changeHandler { +func newChangeHandler(api wdPoStCommands, actor address.Address) *changeHandler { posts := newPostsCache() p := newProver(api, posts) s := newSubmitter(api, posts) @@ -146,7 +149,7 @@ type postResult struct { // proveHandler generates proofs type proveHandler struct { - api changeHandlerAPI + api wdPoStCommands posts *postsCache postResults chan *postResult @@ -163,7 +166,7 @@ type proveHandler struct { } func newProver( - api changeHandlerAPI, + api wdPoStCommands, posts *postsCache, ) *proveHandler { ctx, cancel := context.WithCancel(context.Background()) @@ -248,7 +251,7 @@ func (p *proveHandler) processPostResult(res *postResult) { di := res.currPost.di if res.err != nil { // Proving failed so inform the API - p.api.failPost(res.err, res.ts, di) + p.api.recordPoStFailure(res.err, res.ts, di) log.Warnf("Aborted window post Proving (Deadline: %+v)", di) p.api.onAbort(res.ts, di) @@ -295,7 +298,7 @@ type postInfo struct { // submitHandler submits proofs on-chain type submitHandler struct { - api changeHandlerAPI + api wdPoStCommands posts *postsCache submitResults chan *submitResult @@ -319,7 +322,7 @@ type submitHandler struct { } func newSubmitter( - api changeHandlerAPI, + api wdPoStCommands, posts *postsCache, ) *submitHandler { ctx, cancel := context.WithCancel(context.Background()) @@ -488,7 +491,7 @@ func (s *submitHandler) submitIfReady(ctx context.Context, advance *types.TipSet func (s *submitHandler) processSubmitResult(res *submitResult) { if res.err != nil { // Submit failed so inform the API and go back to the start state - s.api.failPost(res.err, res.pw.ts, res.pw.di) + s.api.recordPoStFailure(res.err, res.pw.ts, res.pw.di) log.Warnf("Aborted window post Submitting (Deadline: %+v)", res.pw.di) s.api.onAbort(res.pw.ts, res.pw.di) diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go index bae4f40fd..a2283cb7c 100644 --- a/storage/wdpost_changehandler_test.go +++ b/storage/wdpost_changehandler_test.go @@ -191,7 +191,7 @@ func (m *mockAPI) wasAbortCalled() bool { return m.abortCalled } -func (m *mockAPI) failPost(err error, ts *types.TipSet, deadline *dline.Info) { +func (m *mockAPI) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) { } func (m *mockAPI) setChangeHandler(ch *changeHandler) { diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index eeff94418..0fe7ea851 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -31,7 +31,8 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func (s *WindowPoStScheduler) failPost(err error, ts *types.TipSet, deadline *dline.Info) { +// recordPoStFailure records a failure in the journal. +func (s *WindowPoStScheduler) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) { s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { c := evtCommon{Error: err} if ts != nil { @@ -99,9 +100,9 @@ func (s *WindowPoStScheduler) runGeneratePoST( ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.generatePoST") defer span.End() - posts, err := s.runPost(ctx, *deadline, ts) + posts, err := s.runPoStCycle(ctx, *deadline, ts) if err != nil { - log.Errorf("runPost failed: %+v", err) + log.Errorf("runPoStCycle failed: %+v", err) return nil, err } @@ -167,7 +168,7 @@ func (s *WindowPoStScheduler) runSubmitPoST( commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil) if err != nil { err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err) - log.Errorf("submitPost failed: %+v", err) + log.Errorf("submitPoStMessage failed: %+v", err) return err } @@ -180,7 +181,7 @@ func (s *WindowPoStScheduler) runSubmitPoST( post.ChainCommitRand = commRand // Submit PoST - sm, submitErr := s.submitPost(ctx, post) + sm, submitErr := s.submitPoStMessage(ctx, post) if submitErr != nil { log.Errorf("submit window post failed: %+v", submitErr) } else { @@ -233,8 +234,25 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.B return sbf, nil } -func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) { - ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries") +// declareRecoveries identifies sectors that were previously marked as faulty +// for our miner, but are now recovered (i.e. are now provable again) and +// still not reported as such. +// +// It then reports the recovery on chain via a `DeclareFaultsRecovered` +// message to our miner actor. +// +// This is always invoked ahead of time, before the deadline for the evaluated +// sectors arrives. That way, recoveries are declared in preparation for those +// sectors to be proven. +// +// If a declaration is made, it awaits for build.MessageConfidence confirmations +// on chain before returning. +// +// TODO: the waiting should happen in the background. Right now this +// is blocking/delaying the actual generation and submission of WindowPoSts in +// this deadline! +func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.RecoveryDeclaration, *types.SignedMessage, error) { + ctx, span := trace.StartSpan(ctx, "storage.declareRecoveries") defer span.End() faulty := uint64(0) @@ -302,7 +320,7 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin Value: types.NewInt(0), } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return recoveries, nil, err } @@ -325,8 +343,21 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uin return recoveries, sm, nil } -func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) { - ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults") +// declareFaults identifies the sectors on the specified proving deadline that +// are faulty, and reports the faults on chain via the `DeclareFaults` message +// to our miner actor. +// +// This is always invoked ahead of time, before the deadline for the evaluated +// sectors arrives. That way, faults are declared before a penalty is accrued. +// +// If a declaration is made, it awaits for build.MessageConfidence confirmations +// on chain before returning. +// +// TODO: the waiting should happen in the background. Right now this +// is blocking/delaying the actual generation and submission of WindowPoSts in +// this deadline! +func (s *WindowPoStScheduler) declareFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) { + ctx, span := trace.StartSpan(ctx, "storage.declareFaults") defer span.End() bad := uint64(0) @@ -387,7 +418,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, Value: types.NewInt(0), // TODO: Is there a fee? } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return faults, nil, err } @@ -410,12 +441,18 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, return faults, sm, nil } -func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) { - ctx, span := trace.StartSpan(ctx, "storage.runPost") +// runPoStCycle runs a full cycle of the PoSt process: +// +// 1. performs recovery declarations for the next deadline. +// 2. performs fault declarations for the next deadline. +// 3. computes and submits proofs, batching partitions and making sure they +// don't exceed message capacity. +func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) { + ctx, span := trace.StartSpan(ctx, "storage.runPoStCycle") defer span.End() go func() { - // TODO: extract from runPost, run on fault cutoff boundaries + // TODO: extract from runPoStCycle, run on fault cutoff boundaries // check faults / recoveries for the *next* deadline. It's already too // late to declare them for this deadline @@ -443,7 +480,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty } ) - if recoveries, sigmsg, err = s.checkNextRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { + if recoveries, sigmsg, err = s.declareRecoveries(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse log.Errorf("checking sector recoveries: %v", err) } @@ -462,7 +499,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return // FORK: declaring faults after ignition upgrade makes no sense } - if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { + if faults, sigmsg, err = s.declareFaults(context.TODO(), declDeadline, partitions, ts.Key()); err != nil { // TODO: This is also potentially really bad, but we try to post anyways log.Errorf("checking sector faults: %v", err) } @@ -765,7 +802,10 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, return proofSectors, nil } -func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (*types.SignedMessage, error) { +// submitPoStMessage builds a SubmitWindowedPoSt message and submits it to +// the mpool. It doesn't synchronously block on confirmations, but it does +// monitor in the background simply for the purposes of logging. +func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *miner.SubmitWindowedPoStParams) (*types.SignedMessage, error) { ctx, span := trace.StartSpan(ctx, "storage.commitPost") defer span.End() @@ -783,13 +823,11 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi Value: types.NewInt(0), } spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} - if err := s.setSender(ctx, msg, spec); err != nil { + if err := s.prepareMessage(ctx, msg, spec); err != nil { return nil, err } - // TODO: consider maybe caring about the output sm, err := s.api.MpoolPushMessage(ctx, msg, spec) - if err != nil { return nil, xerrors.Errorf("pushing message to mpool: %w", err) } @@ -813,14 +851,20 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi return sm, nil } -func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) error { +// prepareMessage prepares a message before sending it, setting: +// +// * the sender (from the AddressSelector, falling back to the worker address if none set) +// * the right gas parameters +func (s *WindowPoStScheduler) prepareMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) error { mi, err := s.api.StateMinerInfo(ctx, s.actor, types.EmptyTSK) if err != nil { return xerrors.Errorf("error getting miner info: %w", err) } - // use the worker as a fallback + // set the worker as a fallback msg.From = mi.Worker + // (optimal) initial estimation with some overestimation that guarantees + // block inclusion within the next 20 tipsets. gm, err := s.api.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK) if err != nil { log.Errorw("estimating gas", "error", err) @@ -828,10 +872,12 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, } *msg = *gm - // estimate + // calculate a more frugal estimation; premium is estimated to guarantee + // inclusion within 5 tipsets, and fee cap is estimated for inclusion + // within 4 tipsets. minGasFeeMsg := *msg - minGasFeeMsg.GasPremium, err = s.api.GasEstimateGasPremium(ctx, 5, msg.From, msg.GasLimit, types.TipSetKey{}) + minGasFeeMsg.GasPremium, err = s.api.GasEstimateGasPremium(ctx, 5, msg.From, msg.GasLimit, types.EmptyTSK) if err != nil { log.Errorf("failed to estimate minimum gas premium: %+v", err) minGasFeeMsg.GasPremium = msg.GasPremium @@ -843,6 +889,8 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message, minGasFeeMsg.GasFeeCap = msg.GasFeeCap } + // goodFunds = funds needed for optimal inclusion probability. + // minFunds = funds needed for more speculative inclusion probability. goodFunds := big.Add(msg.RequiredFunds(), msg.Value) minFunds := big.Min(big.Add(minGasFeeMsg.RequiredFunds(), minGasFeeMsg.Value), goodFunds) diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 3a009d5c7..61f2a324b 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -40,7 +40,7 @@ import ( type mockStorageMinerAPI struct { partitions []api.Partition pushedMessages chan *types.Message - storageMinerApi + fullNodeFilteredAPI } func newMockStorageMinerAPI() *mockStorageMinerAPI { @@ -401,4 +401,4 @@ func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Add return true, nil } -var _ storageMinerApi = &mockStorageMinerAPI{} +var _ fullNodeFilteredAPI = &mockStorageMinerAPI{} diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 8c24a5516..88357c5b3 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -23,8 +23,14 @@ import ( "go.opencensus.io/trace" ) +// WindowPoStScheduler is the coordinator for WindowPoSt submissions, fault +// declaration, and recovery declarations. It watches the chain for reverts and +// applies, and schedules/run those processes as partition deadlines arrive. +// +// WindowPoStScheduler watches the chain though the changeHandler, which in turn +// turn calls the scheduler when the time arrives to do work. type WindowPoStScheduler struct { - api storageMinerApi + api fullNodeFilteredAPI feeCfg config.MinerFeeConfig addrSel *AddressSelector prover storage.Prover @@ -43,7 +49,15 @@ type WindowPoStScheduler struct { // failLk sync.Mutex } -func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, as *AddressSelector, sb storage.Prover, verif ffiwrapper.Verifier, ft sectorstorage.FaultTracker, j journal.Journal, actor address.Address) (*WindowPoStScheduler, error) { +// NewWindowedPoStScheduler creates a new WindowPoStScheduler scheduler. +func NewWindowedPoStScheduler(api fullNodeFilteredAPI, + cfg config.MinerFeeConfig, + as *AddressSelector, + sp storage.Prover, + verif ffiwrapper.Verifier, + ft sectorstorage.FaultTracker, + j journal.Journal, + actor address.Address) (*WindowPoStScheduler, error) { mi, err := api.StateMinerInfo(context.TODO(), actor, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("getting sector size: %w", err) @@ -51,9 +65,9 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, as return &WindowPoStScheduler{ api: api, - feeCfg: fc, + feeCfg: cfg, addrSel: as, - prover: sb, + prover: sp, verifier: verif, faultTracker: ft, proofType: mi.WindowPoStProofType, @@ -70,21 +84,24 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, as }, nil } -type changeHandlerAPIImpl struct { - storageMinerApi - *WindowPoStScheduler -} - func (s *WindowPoStScheduler) Run(ctx context.Context) { - // Initialize change handler - chImpl := &changeHandlerAPIImpl{storageMinerApi: s.api, WindowPoStScheduler: s} - s.ch = newChangeHandler(chImpl, s.actor) + // Initialize change handler. + + // callbacks is a union of the fullNodeFilteredAPI and ourselves. + callbacks := struct { + fullNodeFilteredAPI + *WindowPoStScheduler + }{s.api, s} + + s.ch = newChangeHandler(callbacks, s.actor) defer s.ch.shutdown() s.ch.start() - var notifs <-chan []*api.HeadChange - var err error - var gotCur bool + var ( + notifs <-chan []*api.HeadChange + err error + gotCur bool + ) // not fine to panic after this point for { diff --git a/testplans/Makefile b/testplans/Makefile index 410553b90..38f46baa8 100644 --- a/testplans/Makefile +++ b/testplans/Makefile @@ -6,18 +6,18 @@ download-proofs: go run github.com/filecoin-project/go-paramfetch/paramfetch 2048 ./docker-images/proof-parameters.json build-images: - docker build -t "iptestground/oni-buildbase:v13-lotus" -f "docker-images/Dockerfile.oni-buildbase" "docker-images" - docker build -t "iptestground/oni-runtime:v7" -f "docker-images/Dockerfile.oni-runtime" "docker-images" - docker build -t "iptestground/oni-runtime:v8-debug" -f "docker-images/Dockerfile.oni-runtime-debug" "docker-images" + docker build -t "iptestground/oni-buildbase:v15-lotus" -f "docker-images/Dockerfile.oni-buildbase" "docker-images" + docker build -t "iptestground/oni-runtime:v10" -f "docker-images/Dockerfile.oni-runtime" "docker-images" + docker build -t "iptestground/oni-runtime:v10-debug" -f "docker-images/Dockerfile.oni-runtime-debug" "docker-images" push-images: - docker push iptestground/oni-buildbase:v13-lotus - docker push iptestground/oni-runtime:v7 - docker push iptestground/oni-runtime:v8-debug + docker push iptestground/oni-buildbase:v15-lotus + docker push iptestground/oni-runtime:v10 + docker push iptestground/oni-runtime:v10-debug pull-images: - docker pull iptestground/oni-buildbase:v13-lotus - docker pull iptestground/oni-runtime:v7 - docker pull iptestground/oni-runtime:v8-debug + docker pull iptestground/oni-buildbase:v15-lotus + docker pull iptestground/oni-runtime:v10 + docker pull iptestground/oni-runtime:v10-debug .PHONY: download-proofs build-images push-images pull-images diff --git a/testplans/docker-images/Dockerfile.oni-buildbase b/testplans/docker-images/Dockerfile.oni-buildbase index 012a27fc7..265066537 100644 --- a/testplans/docker-images/Dockerfile.oni-buildbase +++ b/testplans/docker-images/Dockerfile.oni-buildbase @@ -1,10 +1,10 @@ -ARG GO_VERSION=1.15.6 +ARG GO_VERSION=1.16.3 FROM golang:${GO_VERSION}-buster RUN apt-get update && apt-get install -y ca-certificates llvm clang mesa-opencl-icd ocl-icd-opencl-dev jq gcc git pkg-config bzr libhwloc-dev -ARG FILECOIN_FFI_COMMIT=62f89f108a6a8fe9ad6ed52fb7ffbf8594d7ae5c +ARG FILECOIN_FFI_COMMIT=8b97bd8230b77bd32f4f27e4766a6d8a03b4e801 ARG FFI_DIR=/extern/filecoin-ffi RUN mkdir -p ${FFI_DIR} \ diff --git a/testplans/docker-images/Dockerfile.oni-runtime b/testplans/docker-images/Dockerfile.oni-runtime index 2ccb7337c..27144069a 100644 --- a/testplans/docker-images/Dockerfile.oni-runtime +++ b/testplans/docker-images/Dockerfile.oni-runtime @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.15.6 +ARG GO_VERSION=1.16.3 FROM golang:${GO_VERSION}-buster as downloader @@ -8,7 +8,7 @@ FROM golang:${GO_VERSION}-buster as downloader ## 3. Trigger the download. ## Output will be in /var/tmp/filecoin-proof-parameters. -RUN go get github.com/filecoin-project/go-paramfetch/paramfetch +RUN go get github.com/filecoin-project/go-paramfetch/paramfetch@master COPY /proof-parameters.json / RUN paramfetch 8388608 /proof-parameters.json diff --git a/testplans/docker-images/Dockerfile.oni-runtime-debug b/testplans/docker-images/Dockerfile.oni-runtime-debug index a349a70da..856fcc1fc 100644 --- a/testplans/docker-images/Dockerfile.oni-runtime-debug +++ b/testplans/docker-images/Dockerfile.oni-runtime-debug @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.15.6 +ARG GO_VERSION=1.16.3 FROM golang:${GO_VERSION}-buster as downloader @@ -8,11 +8,11 @@ FROM golang:${GO_VERSION}-buster as downloader ## 3. Trigger the download. ## Output will be in /var/tmp/filecoin-proof-parameters. -RUN go get github.com/filecoin-project/go-paramfetch/paramfetch +RUN go get github.com/filecoin-project/go-paramfetch/paramfetch@master COPY /proof-parameters.json / RUN paramfetch 8388608 /proof-parameters.json -ARG LOTUS_COMMIT=b4ad2e5e93dc710d985eb9cf3ee04142efb47bf0 +ARG LOTUS_COMMIT=b8deee048eaf850113e8626a73f64b17ba69a9f6 ## for debug purposes RUN apt update && apt install -y mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config libhwloc-dev curl && git clone https://github.com/filecoin-project/lotus.git && cd lotus/ && git checkout ${LOTUS_COMMIT} && make clean && make all && make install diff --git a/testplans/lotus-soup/_compositions/baseline-docker-1-1-with-restarts.toml b/testplans/lotus-soup/_compositions/baseline-docker-1-1-with-restarts.toml new file mode 100644 index 000000000..28865a03b --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-docker-1-1-with-restarts.toml @@ -0,0 +1,59 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 3 + builder = "docker:go" + runner = "local:docker" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + enable_go_build_cache = true + +[global.run.test_params] + clients = "1" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "3" + random_beacon_type = "mock" + mining_mode = "natural" + bandwidth = "4MB" + + +[[groups]] + id = "bootstrapper" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + +[[groups]] + id = "clients" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + # Bounce the connection during push and pull requests + bounce_conn_data_transfers = "true" diff --git a/testplans/lotus-soup/_compositions/baseline-docker-1-1.toml b/testplans/lotus-soup/_compositions/baseline-docker-1-1.toml index 9012be69c..25a31f9ec 100644 --- a/testplans/lotus-soup/_compositions/baseline-docker-1-1.toml +++ b/testplans/lotus-soup/_compositions/baseline-docker-1-1.toml @@ -23,7 +23,7 @@ miners = "1" genesis_timestamp_offset = "0" balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B - sectors = "10" + sectors = "3" random_beacon_type = "mock" mining_mode = "natural" diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-1-1-versions.toml b/testplans/lotus-soup/_compositions/baseline-k8s-1-1-versions.toml new file mode 100644 index 000000000..051d8e0c6 --- /dev/null +++ b/testplans/lotus-soup/_compositions/baseline-k8s-1-1-versions.toml @@ -0,0 +1,74 @@ +[metadata] + name = "lotus-soup" + author = "" + +[global] + plan = "lotus-soup" + case = "deals-e2e" + total_instances = 3 + builder = "docker:go" + runner = "cluster:k8s" + +[global.build] + selectors = ["testground"] + +[global.run_config] + exposed_ports = { pprof = "6060", node_rpc = "1234", miner_rpc = "2345" } + +[global.build_config] + push_registry=true + go_proxy_mode="remote" + go_proxy_url="http://localhost:8081" + registry_type="aws" + +[global.run.test_params] + clients = "1" + miners = "1" + genesis_timestamp_offset = "0" + balance = "20000000" # These balances will work for maximum 100 nodes, as TotalFilecoin is 2B + sectors = "10" + random_beacon_type = "mock" + mining_mode = "natural" + +[[groups]] + id = "bootstrapper" + [groups.resources] + memory = "512Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "bootstrapper" + +[[groups]] + id = "miners" + [groups.resources] + memory = "4096Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "miner" + [groups.build] + dependencies = [ + { module = "github.com/filecoin-project/lotus", version = "{{.Env.LOTUS_VERSION_MINER}}"}, + ] +[[groups]] + id = "clients" + [groups.resources] + memory = "1024Mi" + cpu = "1000m" + [groups.instances] + count = 1 + percentage = 0.0 + [groups.run] + [groups.run.test_params] + role = "client" + [groups.build] + dependencies = [ + { module = "github.com/filecoin-project/lotus", version = "{{.Env.LOTUS_VERSION_CLIENT}}"}, + ] diff --git a/testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml b/testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml index 18ce024bb..dc6519656 100644 --- a/testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml +++ b/testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml @@ -45,7 +45,7 @@ [[groups]] id = "miners" [groups.resources] - memory = "4096Mi" + memory = "8192Mi" cpu = "1000m" [groups.instances] count = 1 diff --git a/testplans/lotus-soup/_compositions/paych-stress-k8s.toml b/testplans/lotus-soup/_compositions/paych-stress-k8s.toml index cf98960b7..b5d7f9bd4 100644 --- a/testplans/lotus-soup/_compositions/paych-stress-k8s.toml +++ b/testplans/lotus-soup/_compositions/paych-stress-k8s.toml @@ -5,7 +5,7 @@ [global] plan = "lotus-soup" case = "paych-stress" - total_instances = 5 # 2 clients + 2 miners + 1 bootstrapper + total_instances = 4 # 2 clients + 1 miners + 1 bootstrapper builder = "docker:go" runner = "cluster:k8s" @@ -23,7 +23,7 @@ [global.run.test_params] clients = "2" - miners = "2" + miners = "1" genesis_timestamp_offset = "0" balance = "100" ## be careful, this is in FIL. sectors = "10" @@ -44,7 +44,7 @@ [[groups]] id = "miners" - instances = { count = 2 } + instances = { count = 1 } [groups.run.test_params] role = "miner" [groups.resources] diff --git a/testplans/lotus-soup/deals_e2e.go b/testplans/lotus-soup/deals_e2e.go index 234754ae9..6737bdae2 100644 --- a/testplans/lotus-soup/deals_e2e.go +++ b/testplans/lotus-soup/deals_e2e.go @@ -4,19 +4,19 @@ import ( "context" "fmt" "io/ioutil" + mbig "math/big" "math/rand" "os" "time" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/api" + "github.com/libp2p/go-libp2p-core/peer" "github.com/testground/sdk-go/sync" - mbig "math/big" - + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/testplans/lotus-soup/testkit" ) @@ -39,6 +39,8 @@ import ( // Then we create a genesis block that allocates some funds to each node and collects // the presealed sectors. func dealsE2E(t *testkit.TestEnvironment) error { + t.RecordMessage("running node with role '%s'", t.Role) + // Dispatch/forward non-client roles to defaults. if t.Role != "client" { return testkit.HandleDefaultRole(t) @@ -75,9 +77,11 @@ func dealsE2E(t *testkit.TestEnvironment) error { // give some time to the miner, otherwise, we get errors like: // deal errored deal failed: (State=26) error calling node: publishing deal: GasEstimateMessageGas // error: estimating gas used: message execution failed: exit 19, reason: failed to lock balance: failed to lock client funds: not enough balance to lock for addr t0102: escrow balance 0 < locked 0 + required 640297000 (RetCode=19) - time.Sleep(50 * time.Second) + time.Sleep(40 * time.Second) - // generate 1600 bytes of random data + time.Sleep(time.Duration(t.GlobalSeq) * 5 * time.Second) + + // generate 5000000 bytes of random data data := make([]byte, 5000000) rand.New(rand.NewSource(time.Now().UnixNano())).Read(data) @@ -98,6 +102,15 @@ func dealsE2E(t *testkit.TestEnvironment) error { } t.RecordMessage("file cid: %s", fcid) + // Check if we should bounce the connection during data transfers + if t.BooleanParam("bounce_conn_data_transfers") { + t.RecordMessage("Will bounce connection during push and pull data-transfers") + err = bounceConnInTransfers(ctx, t, client, minerAddr.MinerNetAddrs.ID) + if err != nil { + return err + } + } + // start deal t1 := time.Now() deal := testkit.StartDeal(ctx, minerAddr.MinerActorAddr, client, fcid.Root, fastRetrieval) @@ -131,6 +144,55 @@ func dealsE2E(t *testkit.TestEnvironment) error { return nil } +func bounceConnInTransfers(ctx context.Context, t *testkit.TestEnvironment, client api.FullNode, minerPeerID peer.ID) error { + storageConnBroken := false + retrievalConnBroken := false + upds, err := client.ClientDataTransferUpdates(ctx) + if err != nil { + return err + } + + go func() { + for upd := range upds { + dir := "push" + if !upd.IsSender { + dir = "pull" + } + + t.RecordMessage("%s data transfer status: %s, transferred: %d", dir, datatransfer.Statuses[upd.Status], upd.Transferred) + + // Bounce the connection after the first block is sent for the storage deal + if upd.IsSender && upd.Transferred > 0 && !storageConnBroken { + storageConnBroken = true + bounceConnection(ctx, t, client, minerPeerID) + } + + // Bounce the connection after the first block is received for the retrieval deal + if !upd.IsSender && upd.Transferred > 0 && !retrievalConnBroken { + retrievalConnBroken = true + bounceConnection(ctx, t, client, minerPeerID) + } + } + }() + + return nil +} + +func bounceConnection(ctx context.Context, t *testkit.TestEnvironment, client api.FullNode, minerPeerID peer.ID) { + t.RecordMessage("disconnecting peer %s", minerPeerID) + client.NetBlockAdd(ctx, api.NetBlockList{ + Peers: []peer.ID{minerPeerID}, + }) + + go func() { + time.Sleep(3 * time.Second) + t.RecordMessage("reconnecting to peer %s", minerPeerID) + client.NetBlockRemove(ctx, api.NetBlockList{ + Peers: []peer.ID{minerPeerID}, + }) + }() +} + // filToAttoFil converts a fractional filecoin value into AttoFIL, rounding if necessary func filToAttoFil(f float64) big.Int { a := mbig.NewFloat(f) diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index 316f488f2..55da298db 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -1,6 +1,6 @@ module github.com/filecoin-project/lotus/testplans/lotus-soup -go 1.15 +go 1.16 require ( contrib.go.opencensus.io/exporter/prometheus v0.1.0 @@ -8,33 +8,35 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/drand/drand v1.2.1 github.com/filecoin-project/go-address v0.0.5 - github.com/filecoin-project/go-fil-markets v1.2.5 + github.com/filecoin-project/go-data-transfer v1.6.0 + github.com/filecoin-project/go-fil-markets v1.5.0 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/lotus v1.9.0 - github.com/filecoin-project/specs-actors v0.9.13 + github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 + github.com/filecoin-project/specs-actors v0.9.14 github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/hashicorp/go-multierror v1.1.0 github.com/influxdata/influxdb v1.8.3 // indirect github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-datastore v0.4.5 + github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 // indirect github.com/ipfs/go-ipfs-files v0.0.8 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 + github.com/ipfs/go-log/v2 v2.1.3 github.com/ipfs/go-merkledag v0.3.2 github.com/ipfs/go-unixfs v0.2.4 github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c - github.com/libp2p/go-libp2p v0.12.0 - github.com/libp2p/go-libp2p-core v0.7.0 + github.com/libp2p/go-libp2p v0.14.2 + github.com/libp2p/go-libp2p-core v0.8.5 github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 github.com/multiformats/go-multiaddr v0.3.1 github.com/multiformats/go-multiaddr-net v0.2.0 github.com/testground/sdk-go v0.2.6 - go.opencensus.io v0.22.5 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a + go.opencensus.io v0.23.0 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c ) // This will work in all build modes: docker:go, exec:go, and local go build. diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index 40680b689..9969c5182 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -46,6 +46,8 @@ github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K1 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= +github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa/go.mod h1:WUmMvh9wMtqj1Xhf1hf3kp9RvL+y6odtdYxpyZjb90U= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= @@ -101,6 +103,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= +github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= @@ -110,14 +114,18 @@ github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dm github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4= @@ -191,8 +199,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= @@ -259,8 +269,9 @@ github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+ github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= -github.com/filecoin-project/go-amt-ipld/v3 v3.0.0 h1:Ou/q82QeHGOhpkedvaxxzpBYuqTxLCcj5OChkDNx4qc= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= +github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= @@ -268,35 +279,36 @@ github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-commp-utils v0.1.0 h1:PaDxoXYh1TXnnz5kA/xSObpAQwcJSUs4Szb72nuaNdk= -github.com/filecoin-project/go-commp-utils v0.1.0/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= +github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.4.3 h1:ECEw69NOfmEZ7XN1NSBvj3KTbbH2mIczQs+Z2w4bD7c= -github.com/filecoin-project/go-data-transfer v1.4.3/go.mod h1:n8kbDQXWrY1c4UgfMa9KERxNCWbOTDwdNhf2MpN9dpo= +github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= +github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.2.5 h1:bQgtXbwxKyPxSEQoUI5EaTHJ0qfzyd5NosspuADCm6Y= -github.com/filecoin-project/go-fil-markets v1.2.5/go.mod h1:7JIqNBmFvOyBzk/EiPYnweVdQnWhshixb5B9b1653Ag= +github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= +github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= -github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1 h1:zbzs46G7bOctkZ+JUX3xirrj0RaEsi+27dtlsgrTNBg= github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= +github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498 h1:G10ezOvpH1CLXQ19EA9VWNwyL0mg536ujSayjV0yg0k= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -311,28 +323,34 @@ github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/ github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/lotus v1.9.0 h1:TDKDLbmgYTL8M0mlfd9HmJVEYRlSSOQnakg4+9rfyWM= -github.com/filecoin-project/lotus v1.9.0/go.mod h1:4YC/8rizrrp2wKOYvHQEjCxZbziXi68BhrzvI+FCye0= +github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4 h1:u5/uky+PdeaGuEGsExtVP8UUB8No/e873xjqcb7h3CM= +github.com/filecoin-project/lotus v1.10.1-0.20210707122128-1fe08f5973f4/go.mod h1:8ooe5Rzw80rJL0br81A8NNiwZ4BUVzPRwAnDxUG4E7g= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors v0.9.13 h1:rUEOQouefi9fuVY/2HOroROJlZbOzWYXXeIh41KF2M4= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= +github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y= -github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb h1:orr/sMzrDZUPAveRE+paBdu1kScIUO5zm+HYeh+VlhA= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v3 v3.1.0 h1:s4qiPw8pgypqBGAy853u/zdZJ7K9cTZdM1rTiSonHrg= +github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= +github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= -github.com/filecoin-project/specs-actors/v4 v4.0.0 h1:vMALksY5G3J5rj3q9rbcyB+f4Tk1xrLqSgdB3jOok4s= +github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= +github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57 h1:N6IBsnGXfAMXd677G6EiOKewFwQ7Ulcuupi4U6wYmXE= +github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= +github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= +github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo= +github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= @@ -343,6 +361,10 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= +github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -395,8 +417,9 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= @@ -415,8 +438,9 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -428,8 +452,9 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= @@ -442,14 +467,16 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= -github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -623,8 +650,9 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.6.0 h1:x6UvDUGA7wjaKNqx5Vbo7FGT8aJ5ryYA0dMQ5jN3dF0= -github.com/ipfs/go-graphsync v0.6.0/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17 h1:rOoF88dVuDGbIx7idSdimN7JvXriyOIT96WD3eX9sHA= +github.com/ipfs/go-graphsync v0.6.2-0.20210428121800-88edb5462e17/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= @@ -698,11 +726,14 @@ github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBW github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= -github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 h1:3bijxqzQ1O9yg7gd7Aqk80oaEvsJ+uXw0zSvi2qR3Jw= github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.3 h1:1iS3IU7aXRlbgUpN8yTTpJ53NXYjAe37vcI5+5nYrzk= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= @@ -720,6 +751,7 @@ github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUn github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8= github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= @@ -730,13 +762,16 @@ github.com/ipfs/iptb v1.4.0 h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo= github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= github.com/ipfs/iptb-plugins v0.2.1 h1:au4HWn9/pRPbkxA08pDx2oRAs4cnbgQWgV0teYXuuGA= github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= +github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d h1:iphSzTuPqyDgH7WUVZsdqUnQNzYgIblsVr1zhVNA33U= github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= +github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018 h1:RbRHv8epkmvBYA5cGfz68GUSbOgx5j/7ObLIl4Rsif0= github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/ipld/go-ipld-prime-proto v0.1.0 h1:j7gjqrfwbT4+gXpHwEx5iMssma3mnctC7YaCimsFP70= @@ -803,6 +838,7 @@ github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 h1:51kHw7l/dUDdOdW github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -819,6 +855,7 @@ github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c/go.mod github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -838,8 +875,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -862,8 +900,9 @@ github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qD github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.12.0 h1:+xai9RQnQ9l5elFOKvp5wRyjyWisSwEx+6nU2+onpUA= github.com/libp2p/go-libp2p v0.12.0/go.mod h1:FpHZrfC1q7nA8jitvdjKBDF31hguaC676g/nT9PgQM0= +github.com/libp2p/go-libp2p v0.14.2 h1:qs0ABtjjNjS+RIXT1uM7sMJEvIc0pq2nKR0VQxFXhHI= +github.com/libp2p/go-libp2p v0.14.2/go.mod h1:0PQMADQEjCM2l8cSMYDpTgsb8gr6Zq7i4LUgq1mlW2E= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= @@ -874,8 +913,9 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0 h1:3y8XQbpr+ssX8QfZUHekjHCYK64sj6/4hnf/awD4+Ug= github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= @@ -922,8 +962,12 @@ github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.7.0 h1:4a0TMjrWNTZlNvcqxZmrMRDi/NQWrhwO2pkTuLSQ/IQ= github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= @@ -958,8 +1002,10 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3 github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.3.0 h1:CZyqqKP0BSGQyPLvpRQougbfXaaaJZdGgzhCpJNuNSk= github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= +github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= +github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= @@ -971,8 +1017,8 @@ github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFx github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= -github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= +github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds= +github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= @@ -987,8 +1033,9 @@ github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= +github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= @@ -1002,8 +1049,8 @@ github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 h1: github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6/go.mod h1:8ZodgKS4qRLayfw9FDKDd9DX4C16/GMofDxSldG8QPI= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= -github.com/libp2p/go-libp2p-quic-transport v0.9.0 h1:WPuq5nV/chmIZIzvrkC2ulSdAQ0P0BDvgvAhZFOZ59E= -github.com/libp2p/go-libp2p-quic-transport v0.9.0/go.mod h1:xyY+IgxL0qsW7Kiutab0+NlxM0/p9yRtrGTYsuMWf70= +github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1030,8 +1077,9 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.3.1 h1:UTobu+oQHGdXTOGpZ4RefuVqYoJXcT0EBtSR74m2LkI= github.com/libp2p/go-libp2p-swarm v0.3.1/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= +github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1039,8 +1087,9 @@ github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MB github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0 h1:ZiBYstPamsi7y6NJZebRudUzsYmVkt998hltyLqf8+g= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= @@ -1050,8 +1099,9 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= @@ -1061,8 +1111,9 @@ github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.4.1 h1:TJxRVPY9SjH7TNrNC80l1OJMBiWhs1qpKmeB+1Ug3xU= -github.com/libp2p/go-libp2p-yamux v0.4.1/go.mod h1:FA/NjRYRVNjqOzpGuGqcruH7jAU2mYIjtKBicVOL3dc= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1074,8 +1125,9 @@ github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTW github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0 h1:Ov/D+8oBlbRkjBs1R1Iua8hJ8cUfbdiW8EOdZuxcgaI= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -1087,8 +1139,9 @@ github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/ github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -1104,8 +1157,9 @@ github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2 github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= @@ -1127,8 +1181,9 @@ github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k= +github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= @@ -1140,12 +1195,16 @@ github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= -github.com/lucas-clemente/quic-go v0.18.1 h1:DMR7guC0NtVS8zNZR3IO7NARZvZygkSC56GGtC6cyys= -github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= +github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1157,13 +1216,13 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= -github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.0 h1:i/YPXVxz8q9umso/5y474CNcHmTpA+5DH+mFPjx6PZg= -github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -1179,8 +1238,9 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= @@ -1199,6 +1259,8 @@ github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nr github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1246,8 +1308,9 @@ github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/94 github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.1.0/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY= -github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= +github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= @@ -1277,8 +1340,10 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0 h1:6AuNmQVKUkRnddw2YiDjt5Elit40SFxMJkVnhmETXtU= github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1295,8 +1360,6 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= @@ -1315,6 +1378,7 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1419,6 +1483,8 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1503,6 +1569,7 @@ github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -1612,6 +1679,7 @@ github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/ github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= @@ -1640,8 +1708,8 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1695,16 +1763,19 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1731,8 +1802,9 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1743,6 +1815,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1778,6 +1851,7 @@ golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1785,8 +1859,11 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201022231255-08b38378de70 h1:Z6x4N9mAi4oF0TbHweCsH618MO6OI6UFgV0FP5n0wBY= golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1804,8 +1881,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1875,14 +1952,21 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83 h1:kHSDPqCtsHZOg0nVylfTo20DDhE9gG4Y0jn7hKQ0QAM= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1928,10 +2012,12 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696 h1:Bfazo+enXJET5SbHeh95NtxabJF6fJ9r/jpfRJgd3j4= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2006,8 +2092,9 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2023,8 +2110,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= diff --git a/testplans/lotus-soup/init.go b/testplans/lotus-soup/init.go index a8d8e7478..c20f5f2b8 100644 --- a/testplans/lotus-soup/init.go +++ b/testplans/lotus-soup/init.go @@ -22,6 +22,12 @@ func init() { _ = log.SetLogLevel("stats", "WARN") _ = log.SetLogLevel("dht/RtRefreshManager", "ERROR") // noisy _ = log.SetLogLevel("bitswap", "ERROR") // noisy + _ = log.SetLogLevel("badgerbs", "ERROR") // noisy + _ = log.SetLogLevel("sub", "ERROR") // noisy + _ = log.SetLogLevel("pubsub", "ERROR") // noisy + _ = log.SetLogLevel("chain", "ERROR") // noisy + _ = log.SetLogLevel("chainstore", "ERROR") // noisy + _ = log.SetLogLevel("basichost", "ERROR") // noisy _ = os.Setenv("BELLMAN_NO_GPU", "1") @@ -36,7 +42,7 @@ func init() { // deadline when the challenge is available. // // This will auto-scale the proving period. - policy.SetWPoStChallengeWindow(abi.ChainEpoch(5)) + // policy.SetWPoStChallengeWindow(abi.ChainEpoch(5)) // commented-out until we enable PoSt faults tests // Number of epochs between publishing the precommit and when the challenge for interactive PoRep is drawn // used to ensure it is not predictable by miner. diff --git a/testplans/lotus-soup/manifest.toml b/testplans/lotus-soup/manifest.toml index 8cc2f4caf..9f5a57444 100644 --- a/testplans/lotus-soup/manifest.toml +++ b/testplans/lotus-soup/manifest.toml @@ -9,8 +9,8 @@ enabled = true [builders."docker:go"] enabled = true -build_base_image = "iptestground/oni-buildbase:v13-lotus" -runtime_image = "iptestground/oni-runtime:v8-debug" +build_base_image = "iptestground/oni-buildbase:v15-lotus" +runtime_image = "iptestground/oni-runtime:v10-debug" [runners."local:exec"] enabled = true @@ -58,6 +58,9 @@ instances = { min = 1, max = 100, default = 5 } # Fast retrieval fast_retrieval = { type = "bool", default = false } + # Bounce connection during push and pull data transfers + bounce_conn_data_transfers = { type = "bool", default = false } + [[testcases]] name = "drand-halting" diff --git a/testplans/lotus-soup/rfwp/chain_state.go b/testplans/lotus-soup/rfwp/chain_state.go index 90159e924..d91acdff9 100644 --- a/testplans/lotus-soup/rfwp/chain_state.go +++ b/testplans/lotus-soup/rfwp/chain_state.go @@ -7,6 +7,8 @@ import ( "encoding/json" "fmt" "io" + "math" + corebig "math/big" "os" "sort" "text/tabwriter" @@ -27,6 +29,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" tstats "github.com/filecoin-project/lotus/tools/stats" ) @@ -581,18 +584,24 @@ func (i *MinerInfo) MarshalPlainText() ([]byte, error) { fmt.Fprintf(w, "Sector Size: %s\n", i.SectorSize) pow := i.MinerPower - rpercI := types.BigDiv(types.BigMul(pow.MinerPower.RawBytePower, types.NewInt(1000000)), pow.TotalPower.RawBytePower) - qpercI := types.BigDiv(types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(1000000)), pow.TotalPower.QualityAdjPower) fmt.Fprintf(w, "Byte Power: %s / %s (%0.4f%%)\n", types.SizeStr(pow.MinerPower.RawBytePower), types.SizeStr(pow.TotalPower.RawBytePower), - float64(rpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) fmt.Fprintf(w, "Actual Power: %s / %s (%0.4f%%)\n", types.DeciStr(pow.MinerPower.QualityAdjPower), types.DeciStr(pow.TotalPower.QualityAdjPower), - float64(qpercI.Int64())/10000) + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) fmt.Fprintf(w, "\tCommitted: %s\n", types.SizeStr(i.CommittedBytes)) @@ -608,16 +617,50 @@ func (i *MinerInfo) MarshalPlainText() ([]byte, error) { if !i.MinerPower.HasMinPower { fmt.Fprintf(w, "Below minimum power threshold, no blocks will be won\n") } else { - expWinChance := float64(types.BigMul(qpercI, types.NewInt(build.BlocksPerEpoch)).Int64()) / 1000000 - if expWinChance > 0 { - if expWinChance > 1 { - expWinChance = 1 - } - winRate := time.Duration(float64(time.Second*time.Duration(build.BlockDelaySecs)) / expWinChance) - winPerDay := float64(time.Hour*24) / float64(winRate) - fmt.Fprintln(w, "Expected block win rate: ") - fmt.Fprintf(w, "%.4f/day (every %s)\n", winPerDay, winRate.Truncate(time.Second)) + winRatio := new(corebig.Rat).SetFrac( + types.BigMul(pow.MinerPower.QualityAdjPower, types.NewInt(build.BlocksPerEpoch)).Int, + pow.TotalPower.QualityAdjPower.Int, + ) + + if winRatioFloat, _ := winRatio.Float64(); winRatioFloat > 0 { + + // if the corresponding poisson distribution isn't infinitely small then + // throw it into the mix as well, accounting for multi-wins + winRationWithPoissonFloat := -math.Expm1(-winRatioFloat) + winRationWithPoisson := new(corebig.Rat).SetFloat64(winRationWithPoissonFloat) + if winRationWithPoisson != nil { + winRatio = winRationWithPoisson + winRatioFloat = winRationWithPoissonFloat + } + + weekly, _ := new(corebig.Rat).Mul( + winRatio, + new(corebig.Rat).SetInt64(7*builtin.EpochsInDay), + ).Float64() + + avgDuration, _ := new(corebig.Rat).Mul( + new(corebig.Rat).SetInt64(builtin.EpochDurationSeconds), + new(corebig.Rat).Inv(winRatio), + ).Float64() + + fmt.Fprintf(w, "Projected average block win rate: %.02f/week (every %s)\n", + weekly, + (time.Second * time.Duration(avgDuration)).Truncate(time.Second).String(), + ) + + // Geometric distribution of P(Y < k) calculated as described in https://en.wikipedia.org/wiki/Geometric_distribution#Probability_Outcomes_Examples + // https://www.wolframalpha.com/input/?i=t+%3E+0%3B+p+%3E+0%3B+p+%3C+1%3B+c+%3E+0%3B+c+%3C1%3B+1-%281-p%29%5E%28t%29%3Dc%3B+solve+t + // t == how many dice-rolls (epochs) before win + // p == winRate == ( minerPower / netPower ) + // c == target probability of win ( 99.9% in this case ) + fmt.Fprintf(w, "Projected block win with 99.9%% probability every %s\n", + (time.Second * time.Duration( + builtin.EpochDurationSeconds*math.Log(1-0.999)/ + math.Log(1-winRatioFloat), + )).Truncate(time.Second).String(), + ) + fmt.Fprintln(w, "(projections DO NOT account for future network and miner growth)") } } diff --git a/testplans/lotus-soup/testkit/net.go b/testplans/lotus-soup/testkit/net.go index 018813830..d2dbc2ae6 100644 --- a/testplans/lotus-soup/testkit/net.go +++ b/testplans/lotus-soup/testkit/net.go @@ -74,6 +74,11 @@ func ApplyNetworkParameters(t *TestEnvironment) { t.D().RecordPoint("duplicate_packet_correlation", float64(ls.DuplicateCorr)) } + if t.IsParamSet("bandwidth") { + ls.Bandwidth = t.SizeParam("bandwidth") + t.D().RecordPoint("bandwidth_bytes", float64(ls.Bandwidth)) + } + t.NetClient.MustConfigureNetwork(ctx, &network.Config{ Network: "default", Enable: true, diff --git a/testplans/lotus-soup/testkit/role_bootstrapper.go b/testplans/lotus-soup/testkit/role_bootstrapper.go index 14f74c5ed..4a6ac56c9 100644 --- a/testplans/lotus-soup/testkit/role_bootstrapper.go +++ b/testplans/lotus-soup/testkit/role_bootstrapper.go @@ -120,10 +120,11 @@ func PrepareBootstrapper(t *TestEnvironment) (*Bootstrapper, error) { bootstrapperIP := t.NetClient.MustGetDataNetworkIP().String() n := &LotusNode{} + r := repo.NewMemory(nil) stop, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), - node.Repo(repo.NewMemory(nil)), + node.Base(), + node.Repo(r), node.Override(new(modules.Genesis), modtest.MakeGenesisMem(&genesisBuffer, genesisTemplate)), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), withListenAddress(bootstrapperIP), diff --git a/testplans/lotus-soup/testkit/role_client.go b/testplans/lotus-soup/testkit/role_client.go index 9fcd42902..d18a835d2 100644 --- a/testplans/lotus-soup/testkit/role_client.go +++ b/testplans/lotus-soup/testkit/role_client.go @@ -66,7 +66,7 @@ func PrepareClient(t *TestEnvironment) (*LotusClient, error) { n := &LotusNode{} stop, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), + node.Base(), node.Repo(nodeRepo), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), withGenesis(genesisMsg.Genesis), diff --git a/testplans/lotus-soup/testkit/role_miner.go b/testplans/lotus-soup/testkit/role_miner.go index a0248cfdd..52bcfc98b 100644 --- a/testplans/lotus-soup/testkit/role_miner.go +++ b/testplans/lotus-soup/testkit/role_miner.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/repo" @@ -52,6 +53,7 @@ type LotusMiner struct { NodeRepo repo.Repo FullNetAddrs []peer.AddrInfo GenesisMsg *GenesisMsg + Subsystems config.MinerSubsystemConfig t *TestEnvironment } @@ -141,12 +143,22 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { return nil, err } + var subsystems config.MinerSubsystemConfig + { lr, err := minerRepo.Lock(repo.StorageMiner) if err != nil { return nil, err } + c, err := lr.Config() + if err != nil { + return nil, err + } + + cfg := c.(*config.StorageMiner) + subsystems = cfg.Subsystems + ks, err := lr.KeyStore() if err != nil { return nil, err @@ -239,7 +251,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { stop1, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), + node.Base(), node.Repo(nodeRepo), withGenesis(genesisMsg.Genesis), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), @@ -260,8 +272,8 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { } minerOpts := []node.Option{ - node.StorageMiner(&n.MinerApi), - node.Online(), + node.StorageMiner(&n.MinerApi, subsystems), + node.Base(), node.Repo(minerRepo), node.Override(new(api.FullNode), n.FullApi), node.Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ @@ -416,7 +428,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { return err.ErrorOrNil() } - m := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, t} + m := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, subsystems, t} return m, nil } @@ -443,7 +455,7 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) { stop1, err := node.New(context.Background(), node.FullAPI(&n.FullApi), - node.Online(), + node.Base(), node.Repo(nodeRepo), //withGenesis(genesisMsg.Genesis), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("node_rpc", "0"))), @@ -457,8 +469,8 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) { } minerOpts := []node.Option{ - node.StorageMiner(&n.MinerApi), - node.Online(), + node.StorageMiner(&n.MinerApi, m.Subsystems), + node.Base(), node.Repo(minerRepo), node.Override(new(api.FullNode), n.FullApi), withApiEndpoint(fmt.Sprintf("/ip4/0.0.0.0/tcp/%s", t.PortNumber("miner_rpc", "0"))), @@ -501,7 +513,7 @@ func RestoreMiner(t *TestEnvironment, m *LotusMiner) (*LotusMiner, error) { t.RecordMessage("connected to full node of miner %d on %v", i, fullNetAddrs[i]) } - pm := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, t} + pm := &LotusMiner{n, minerRepo, nodeRepo, fullNetAddrs, genesisMsg, m.Subsystems, t} return pm, err } @@ -600,7 +612,7 @@ func startStorageMinerAPIServer(t *TestEnvironment, repo repo.Repo, minerApi api rpcServer.Register("Filecoin", minerApi) mux.Handle("/rpc/v0", rpcServer) - mux.PathPrefix("/remote").HandlerFunc(minerApi.(*impl.StorageMinerAPI).ServeRemote) + mux.PathPrefix("/remote").HandlerFunc(minerApi.(*impl.StorageMinerAPI).ServeRemote(true)) mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof exporter, err := prometheus.NewExporter(prometheus.Options{ diff --git a/tools/packer/scripts/butterflynet/lotus-init.sh b/tools/packer/scripts/butterflynet/lotus-init.sh index f7afd4dfa..cfbf93f78 100755 --- a/tools/packer/scripts/butterflynet/lotus-init.sh +++ b/tools/packer/scripts/butterflynet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi diff --git a/tools/packer/scripts/calibrationnet/lotus-init.sh b/tools/packer/scripts/calibrationnet/lotus-init.sh index d68b3357c..77260fa29 100755 --- a/tools/packer/scripts/calibrationnet/lotus-init.sh +++ b/tools/packer/scripts/calibrationnet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi diff --git a/tools/packer/scripts/mainnet/lotus-init.sh b/tools/packer/scripts/mainnet/lotus-init.sh index a014f617e..b22853365 100755 --- a/tools/packer/scripts/mainnet/lotus-init.sh +++ b/tools/packer/scripts/mainnet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi diff --git a/tools/packer/scripts/nerpanet/lotus-init.sh b/tools/packer/scripts/nerpanet/lotus-init.sh index 968ae395c..a0f19ae92 100755 --- a/tools/packer/scripts/nerpanet/lotus-init.sh +++ b/tools/packer/scripts/nerpanet/lotus-init.sh @@ -6,7 +6,7 @@ GATE="$LOTUS_PATH"/date_initialized # Don't init if already initialized. -if [ -f "GATE" ]; then +if [ -f "$GATE" ]; then echo lotus already initialized. exit 0 fi