diff --git a/.circleci/config.yml b/.circleci/config.yml index e91c41129..70e435d3b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ version: 2.1 orbs: - aws-cli: circleci/aws-cli@1.3.2 - docker: circleci/docker@2.1.4 + aws-cli: circleci/aws-cli@4.1.1 + docker: circleci/docker@2.3.0 executors: golang: @@ -70,8 +70,6 @@ commands: name: Restore parameters cache keys: - 'v26-2k-lotus-params' - paths: - - /var/tmp/filecoin-proof-parameters/ - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache @@ -96,6 +94,7 @@ commands: git fetch --all install-ubuntu-deps: steps: + - run: sudo apt install curl ca-certificates gnupg - run: sudo apt-get update - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev check-go-version: @@ -143,9 +142,9 @@ jobs: Run tests with gotestsum. working_directory: ~/lotus parameters: &test-params - executor: - type: executor - default: golang + resource_class: + type: string + default: medium+ go-test-flags: type: string default: "-timeout 20m" @@ -164,7 +163,14 @@ jobs: type: string default: unit description: Test suite name to report to CircleCI. - executor: << parameters.executor >> + docker: + - image: cimg/go:1.20 + environment: + LOTUS_HARMONYDB_HOSTS: yugabyte + - image: yugabytedb/yugabyte:2.18.0.0-b65 + command: bin/yugabyted start --daemon=false + name: yugabyte + resource_class: << parameters.resource_class >> steps: - install-ubuntu-deps - attach_workspace: @@ -182,6 +188,8 @@ jobs: command: | mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-artifacts + dockerize -wait tcp://yugabyte:5433 -timeout 3m + env gotestsum \ --format standard-verbose \ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ @@ -209,7 +217,9 @@ jobs: Branch on github.com/filecoin-project/test-vectors to checkout and test with. If empty (the default) the commit defined by the git submodule is used. - executor: << parameters.executor >> + docker: + - image: cimg/go:1.20 + resource_class: << parameters.resource_class >> steps: - install-ubuntu-deps - attach_workspace: @@ -396,15 +406,14 @@ jobs: Run golangci-lint. working_directory: ~/lotus parameters: - executor: - type: executor - default: golang args: type: string default: '' description: | Arguments to pass to golangci-lint - executor: << parameters.executor >> + docker: + - image: cimg/go:1.20 + resource_class: medium+ steps: - install-ubuntu-deps - attach_workspace: @@ -575,7 +584,7 @@ workflows: - build suite: itest-deals_concurrent target: "./itests/deals_concurrent_test.go" - executor: golang-2xl + resource_class: 2xlarge - test: name: test-itest-deals_invalid_utf8_label requires: @@ -768,6 +777,18 @@ workflows: - build suite: itest-get_messages_in_ts target: "./itests/get_messages_in_ts_test.go" + - test: + name: test-itest-harmonydb + requires: + - build + suite: itest-harmonydb + target: "./itests/harmonydb_test.go" + - test: + name: test-itest-harmonytask + requires: + - build + suite: itest-harmonytask + target: "./itests/harmonytask_test.go" - test: name: test-itest-lite_migration requires: @@ -976,14 +997,14 @@ workflows: - build suite: itest-wdpost_worker_config target: "./itests/wdpost_worker_config_test.go" - executor: golang-2xl + resource_class: 2xlarge - test: name: test-itest-worker requires: - build suite: itest-worker target: "./itests/worker_test.go" - executor: golang-2xl + resource_class: 2xlarge - test: name: test-itest-worker_upgrade requires: @@ -996,32 +1017,28 @@ workflows: - build suite: utest-unit-cli target: "./cli/... ./cmd/... ./api/..." + resource_class: 2xlarge get-params: true - executor: golang-2xl - test: name: test-unit-node requires: - build suite: utest-unit-node target: "./node/..." - - - test: name: test-unit-rest requires: - build suite: utest-unit-rest target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..." - - executor: golang-2xl + resource_class: 2xlarge - test: name: test-unit-storage requires: - build suite: utest-unit-storage target: "./storage/... ./extern/..." - - + get-params: true - test: go-test-flags: "-run=TestMulticoreSDR" requires: diff --git a/.circleci/gen.go b/.circleci/gen.go index 93f409df2..19329247a 100644 --- a/.circleci/gen.go +++ b/.circleci/gen.go @@ -10,11 +10,25 @@ import ( "text/template" ) +var GoVersion = "" // from init below. Ex: 1.19.7 + //go:generate go run ./gen.go .. //go:embed template.yml var templateFile embed.FS +func init() { + b, err := os.ReadFile("../go.mod") + if err != nil { + panic("cannot find go.mod in parent folder") + } + for _, line := range strings.Split(string(b), "\n") { + if strings.HasPrefix(line, "go ") { + GoVersion = line[3:] + } + } +} + type ( dirs = []string suite = string @@ -111,6 +125,7 @@ func main() { Networks []string ItestFiles []string UnitSuites map[string]string + GoVersion string } in := data{ Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"}, @@ -125,6 +140,7 @@ func main() { } return ret }(), + GoVersion: GoVersion, } out, err := os.Create("./config.yml") diff --git a/.circleci/template.yml b/.circleci/template.yml index 71616f05f..9011f1a86 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -1,7 +1,7 @@ version: 2.1 orbs: - aws-cli: circleci/aws-cli@1.3.2 - docker: circleci/docker@2.1.4 + aws-cli: circleci/aws-cli@4.1.1 + docker: circleci/docker@2.3.0 executors: golang: @@ -70,8 +70,6 @@ commands: name: Restore parameters cache keys: - 'v26-2k-lotus-params' - paths: - - /var/tmp/filecoin-proof-parameters/ - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache @@ -96,6 +94,7 @@ commands: git fetch --all install-ubuntu-deps: steps: + - run: sudo apt install curl ca-certificates gnupg - run: sudo apt-get update - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev check-go-version: @@ -143,9 +142,9 @@ jobs: Run tests with gotestsum. working_directory: ~/lotus parameters: &test-params - executor: - type: executor - default: golang + resource_class: + type: string + default: medium+ go-test-flags: type: string default: "-timeout 20m" @@ -164,7 +163,14 @@ jobs: type: string default: unit description: Test suite name to report to CircleCI. - executor: << parameters.executor >> + docker: + - image: cimg/go:[[ .GoVersion]] + environment: + LOTUS_HARMONYDB_HOSTS: yugabyte + - image: yugabytedb/yugabyte:2.18.0.0-b65 + command: bin/yugabyted start --daemon=false + name: yugabyte + resource_class: << parameters.resource_class >> steps: - install-ubuntu-deps - attach_workspace: @@ -182,6 +188,8 @@ jobs: command: | mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-artifacts + dockerize -wait tcp://yugabyte:5433 -timeout 3m + env gotestsum \ --format standard-verbose \ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ @@ -209,7 +217,9 @@ jobs: Branch on github.com/filecoin-project/test-vectors to checkout and test with. If empty (the default) the commit defined by the git submodule is used. - executor: << parameters.executor >> + docker: + - image: cimg/go:[[ .GoVersion]] + resource_class: << parameters.resource_class >> steps: - install-ubuntu-deps - attach_workspace: @@ -396,15 +406,14 @@ jobs: Run golangci-lint. working_directory: ~/lotus parameters: - executor: - type: executor - default: golang args: type: string default: '' description: | Arguments to pass to golangci-lint - executor: << parameters.executor >> + docker: + - image: cimg/go:[[ .GoVersion]] + resource_class: medium+ steps: - install-ubuntu-deps - attach_workspace: @@ -543,7 +552,7 @@ workflows: suite: itest-[[ $name ]] target: "./itests/[[ $file ]]" [[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]] - executor: golang-2xl + resource_class: 2xlarge [[- end]] [[- if or (eq $name "wdpost") (eq $name "sector_pledge")]] get-params: true @@ -557,9 +566,16 @@ workflows: - build suite: utest-[[ $suite ]] target: "[[ $pkgs ]]" - [[if eq $suite "unit-cli"]]get-params: true[[end]] - [[if eq $suite "unit-cli"]]executor: golang-2xl[[end]] - [[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]] + [[- if eq $suite "unit-storage"]] + get-params: true + [[- end -]] + [[- if eq $suite "unit-cli"]] + resource_class: 2xlarge + get-params: true + [[- end -]] + [[- if eq $suite "unit-rest"]] + resource_class: 2xlarge + [[- end -]] [[- end]] - test: go-test-flags: "-run=TestMulticoreSDR" diff --git a/.gitignore b/.gitignore index 23a0631c3..c40a76fd0 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ /lotus-chainwatch /lotus-shed /lotus-sim +/lotus-provider /lotus-townhall /lotus-fountain /lotus-stats @@ -41,6 +42,7 @@ build/paramfetch.sh bin/ipget bin/tmp/* .idea +.vscode scratchpad build/builtin-actors/v* diff --git a/.golangci.yml b/.golangci.yml index a4cca9bab..1d455e525 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -14,6 +14,7 @@ linters: - varcheck - deadcode - scopelint + - unused # We don't want to skip builtin/ skip-dirs-use-default: false diff --git a/CHANGELOG.md b/CHANGELOG.md index fc7987e61..5da284702 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,192 @@ # Lotus changelog +# UNRELEASED + +## Improvements + +# v1.25.2 / 2024-01-11 + +This is an optional but **highly recommended feature release** of Lotus, as it includes fixes for synchronizations issues that users have experienced. The feature release also introduces `Lotus-Provider` in its alpha testing phase, as well as the ability to call external PC2-binaries during the sealing process. + +## ☢️ Upgrade Warnings ☢️ + +There are no upgrade warnings for this feature release. + +## ⭐️ Highlights ⭐️ + +### Lotus-Provider +The feature release ships the alpha release of the new Lotus-Provider binary, together with its initial features - High Availability of WindowPoSt and WinningPoSt. + +So what is so exciting about Lotus-Provider: + +**High Availability** +- You can run as many `Lotus-Provider` instances as you want for both WindowPoSt and WinningPOSt. +- You can connect them to as many clustered Yugabyte instances as you want to. This allows for an NxN configuration where all instances can communicate with all others. +- You have the option to connect different instances to different chain daemons. + +**Simplicity** +- Once the configuration is in the database, setting up a new machine with Lotus-Provider is straightforward. Simply start the binary with the correct flags to find YugabyteDB and specify which configuration layers it should use. + +**Durability** +- `Lotus-Provider` is designed with robustness in mind. Updates to the system are handled seamlessly, ensuring that performance and stability are maintained when taking down machines in your cluster for updates. + +Read more about [`Lotus-Provider` in the documentation here](https://lotus.filecoin.io/storage-providers/lotus-provider/overview/). And check out the how you can migrate from [Lotus-Miner to Lotus-Provider here](https://lotus.filecoin.io/storage-providers/lotus-provider/setup/). **(Only recommended in testnets while its in Alpha)** + +### External PC2-binaries + +In this feature release, storage providers can call external PC2-binaries during the sealing process. This allows storage providers to leverage the SupraSeal PC2 binary, which has been shown to improve sealing speed in the PC2-phase. For instance, our current benchmarks show that an NVIDIA RTX A5000 card was able to complete PC2 in approximately 2.5 minutes. + +We have verified that SupraSeal PC2 functions properly with Committed Capacity (CC) sectors, both SyntheticPoReps and non-Synthetic PoReps. However calling SupraSeal PC2 with deal sectors is not supported in this feature release. + +For more information on how to use SupraSeal PC2 with your `lotus-worker`, as well as how to use feature, please [refer to the documentation](https://lotus.filecoin.io/tutorials/lotus-miner/supra-seal-pc2/). + +## New features +- feat: sturdypost work branch ([filecoin-project/lotus#11405](https://github.com/filecoin-project/lotus/pull/11405)) + - Adds the `Lotus-Provider` binary, and the HarmonyDB framework. +- feat: worker: Support delegating precommit2 to external binary ([filecoin-project/lotus#11185](https://github.com/filecoin-project/lotus/pull/11185)) + - Allows for delegating PreCommit2 to an exteranl binary. +- feat: build: Add SupraSeal-PC2 binary script ([filecoin-project/lotus#11430](https://github.com/filecoin-project/lotus/pull/11430)) + - Adds a script for building the SupraSeal-PC2 binary easily. +- Feat: daemon: Auto remove existing chain if importing chain file or snapshot ([filecoin-project/lotus#11277](https://github.com/filecoin-project/lotus/pull/11277)) + - Auto removes the existing chain when importing a snapshot. +- feat: Add ETA to lotus sync wait (#11211) ([filecoin-project/lotus#11211](https://github.com/filecoin-project/lotus/pull/11211)) + - Adds a ETA indicator to `lotus sync wait`, so you can get an estimate for how long until sync is completed. +- feat: mpool/wdpost: Maximize feecap config ([filecoin-project/lotus#9746](https://github.com/filecoin-project/lotus/pull/9746)) + - Adds a Maximixe FeeCap Config +- feat: Add lotus-bench cli option to stress test any binary ([filecoin-project/lotus#11270](https://github.com/filecoin-project/lotus/pull/11270)) + - Enables the `Lotus-Bench` to run any binary and analyze their latency and histogram distribution, track most common errors, perform stress testing under different concurrency levels and see how it works under different QPS. +- feat: chain import: don't walk to genesis - 2-3x faster snapshot import (#11446) ([filecoin-project/lotus#11446](https://github.com/filecoin-project/lotus/pull/11446)) + - Improves Snapshot import speed, by not walking back to genesis on import. +- feat: metric: export Mpool message count ([filecoin-project/lotus#11361](https://github.com/filecoin-project/lotus/pull/11361)) + - Adds the mpool count as a prometheus metric. +- feat: bench: flag to output GenerateWinningPoStWithVanilla params ([filecoin-project/lotus#11460](https://github.com/filecoin-project/lotus/pull/11460)) + +## Improvements +- feat: bootstrap: add glif bootstrap node on calibration ([filecoin-project/lotus#11175](https://github.com/filecoin-project/lotus/pull/11175)) +- fix: bench: Set ticket and seed to a non-all zero value ([filecoin-project/lotus#11429](https://github.com/filecoin-project/lotus/pull/11429)) +- fix: alert: Check UDPbuffer-size ([filecoin-project/lotus#11360](https://github.com/filecoin-project/lotus/pull/11360)) +- feat: cli: sort actor CIDs alphabetically before printing (#11345) ([filecoin-project/lotus#11345](https://github.com/filecoin-project/lotus/pull/11345)) +- fix: worker: Connect when --listen is not set ([filecoin-project/lotus#11294](https://github.com/filecoin-project/lotus/pull/11294)) +- fix: miner info: Show correct sector state counts ([filecoin-project/lotus#11456](https://github.com/filecoin-project/lotus/pull/11456)) +- feat: miner: defensive check for equivocation ([filecoin-project/lotus#11321](https://github.com/filecoin-project/lotus/pull/11321)) +- feat: Instructions for setting up Grafana/Prometheus for monitoring local lotus node ([filecoin-project/lotus#11276](https://github.com/filecoin-project/lotus/pull/11276)) +- fix: cli: Wrap error in wallet sign ([filecoin-project/lotus#11273](https://github.com/filecoin-project/lotus/pull/11273)) +- fix: Add time slicing to splitstore purging to reduce lock congestion ([filecoin-project/lotus#11269](https://github.com/filecoin-project/lotus/pull/11269)) +- feat: sealing: load SectorsSummary from sealing SectorStats instead of calling API each time ([filecoin-project/lotus#11353](https://github.com/filecoin-project/lotus/pull/11353)) +- fix: shed: additional metrics in `mpool miner-select-messages` ([filecoin-project/lotus#11253](https://github.com/filecoin-project/lotus/pull/11253)) +- storage: Return soft err when sector alloc fails in acquire ([filecoin-project/lotus#11338](https://github.com/filecoin-project/lotus/pull/11338)) +- feat: miner: log detailed timing breakdown when mining takes longer than the block's timestamp ([filecoin-project/lotus#11228](https://github.com/filecoin-project/lotus/pull/11228)) +- fix: shed: make invariants checker work with splitstore ([filecoin-project/lotus#11391](https://github.com/filecoin-project/lotus/pull/11391)) +- feat: eth: encode eth tx input as solidity ABI (#11402) ([filecoin-project/lotus#11402](https://github.com/filecoin-project/lotus/pull/11402)) +- fix: eth: use the correct state-tree when resolving addresses (#11387) ([filecoin-project/lotus#11387](https://github.com/filecoin-project/lotus/pull/11387)) +- fix: eth: remove trace sanity check (#11385) ([filecoin-project/lotus#11385](https://github.com/filecoin-project/lotus/pull/11385)) +- fix: chain: make failure to load the chain state fatal (#11426) ([filecoin-project/lotus#11426](https://github.com/filecoin-project/lotus/pull/11426)) +- fix: build: an epoch is near an upgrade iff the upgrade is enabled (#11401) ([filecoin-project/lotus#11401](https://github.com/filecoin-project/lotus/pull/11401)) +- fix: eth: handle unresolvable addresses (#11433) ([filecoin-project/lotus#11433](https://github.com/filecoin-project/lotus/pull/11433)) +- fix: eth: correctly encode and simplify native input/output encoding (#11382) ([filecoin-project/lotus#11382](https://github.com/filecoin-project/lotus/pull/11382)) +- fix: worker: listen for interrupt signals in GetStorageMinerAPI loop (#11309) ([filecoin-project/lotus#11309](https://github.com/filecoin-project/lotus/pull/11309)) +- fix: sync: iterate over returned messages directly (#11373) ([filecoin-project/lotus#11373](https://github.com/filecoin-project/lotus/pull/11373)) +- fix: miner: correct duration logs in mineOne ([filecoin-project/lotus#11241](https://github.com/filecoin-project/lotus/pull/11241)) +- fix: cli: Add print to unseal cmd ([filecoin-project/lotus#11271](https://github.com/filecoin-project/lotus/pull/11271)) +- fix: networking: avoid dialing when trying to handshake peers ([filecoin-project/lotus#11262](https://github.com/filecoin-project/lotus/pull/11262)) +- metric milliseconds computation with golang original method (#11403) ([filecoin-project/lotus#11403](https://github.com/filecoin-project/lotus/pull/11403)) +- feat: shed: fix blockstore prune (#11197) ([filecoin-project/lotus#11197](https://github.com/filecoin-project/lotus/pull/11197)) +- refactor:ffi: replace ClearLayerData with ClearCache (#11352) ([filecoin-project/lotus#11352](https://github.com/filecoin-project/lotus/pull/11352)) +- fix: api: compute gasUsedRatio based on max gas in the tipset (#11354) ([filecoin-project/lotus#11354](https://github.com/filecoin-project/lotus/pull/11354)) +- fix: api: compute the effective gas cost with the correct base-fee (#11357) ([filecoin-project/lotus#11357](https://github.com/filecoin-project/lotus/pull/11357)) +- fix: api: return errors on failure to lookup an eth txn receipt (#11329) ([filecoin-project/lotus#11329](https://github.com/filecoin-project/lotus/pull/11329)) +- fix: api: exclude reverted events in `eth_getLogs` results (#11318) ([filecoin-project/lotus#11318](https://github.com/filecoin-project/lotus/pull/11318)) +- api: Add block param to eth_estimateGas ([filecoin-project/lotus#11462](https://github.com/filecoin-project/lotus/pull/11462)) +- opt: fix duplicate check exitcode ([filecoin-project/lotus#11171](https://github.com/filecoin-project/lotus/pull/11171)) +- fix: lotus-provider: show addresses in log ([filecoin-project/lotus#11490](https://github.com/filecoin-project/lotus/pull/11490)) +- fix: lotus-provider: Wait for the correct taskID ([filecoin-project/lotus#11493](https://github.com/filecoin-project/lotus/pull/11493)) +- harmony: Fix task reclaim on restart ([filecoin-project/lotus#11498](https://github.com/filecoin-project/lotus/pull/11498)) +- fix: lotus-provider: Fix log output format in wdPostTaskCmd ([filecoin-project/lotus#11504](https://github.com/filecoin-project/lotus/pull/11504)) +- fix: lp docsgen ([filecoin-project/lotus#11488](https://github.com/filecoin-project/lotus/pull/11488)) +- fix: lotus-provider do not suggest default layer ([filecoin-project/lotus#11486](https://github.com/filecoin-project/lotus/pull/11486)) +- feat: syncer: optimize syncFork for one-epoch forks ([filecoin-project/lotus#11533](https://github.com/filecoin-project/lotus/pull/11533)) +- fix: sync: do not include incoming in return of syncFork ([filecoin-project/lotus#11541](https://github.com/filecoin-project/lotus/pull/11541)) +- fix: wdpost: fix vanilla proof indexes ([filecoin-project/lotus#11550](https://github.com/filecoin-project/lotus/pull/11550)) +- feat: exchange: change GetBlocks to always fetch the requested number of tipsets ([filecoin-project/lotus#11565](https://github.com/filecoin-project/lotus/pull/11565)) + +## Dependencies +- update go-libp2p to v0.31.0 ([filecoin-project/lotus#11225](https://github.com/filecoin-project/lotus/pull/11225)) +- deps: gostatetype (#11437) ([filecoin-project/lotus#11437](https://github.com/filecoin-project/lotus/pull/11437)) +- fix: deps: stop using go-libp2p deprecated peer.ID.Pretty ([filecoin-project/lotus#11263](https://github.com/filecoin-project/lotus/pull/11263)) +- chore:libp2p:update libp2p deps in release-v1.25.2 to v0.31.1 ([filecoin-project/lotus#11524](https://github.com/filecoin-project/lotus/pull/11524)) +- deps: update go-multiaddr to v0.12.0 ([filecoin-project/lotus#11524](https://github.com/filecoin-project/lotus/pull/11558)) +- dep: go-multi-address to v0.12.1 ([filecoin-project/lotus#11564](https://github.com/filecoin-project/lotus/pull/11564)) + +## Others +- chore: update FFI (#11431) ([filecoin-project/lotus#11431](https://github.com/filecoin-project/lotus/pull/11431)) +- chore: build: bump master to v1.25.1-dev ([filecoin-project/lotus#11450](https://github.com/filecoin-project/lotus/pull/11450)) +- chore: releases :merge releases into master ([filecoin-project/lotus#11448](https://github.com/filecoin-project/lotus/pull/11448)) +- chore: actors: update v12 to the final release ([filecoin-project/lotus#11440](https://github.com/filecoin-project/lotus/pull/11440)) +- chore: Remove ipfs main bootstrap nodes (#11200) ([filecoin-project/lotus#11200](https://github.com/filecoin-project/lotus/pull/11200)) +- Remove PL's european bootstrap nodes from mainnet.pi ([filecoin-project/lotus#11315](https://github.com/filecoin-project/lotus/pull/11315)) +- chore: deps: update to go-state-types v0.12.7 ([filecoin-project/lotus#11428](https://github.com/filecoin-project/lotus/pull/11428)) +- fix: Add .vscode to gitignore ([filecoin-project/lotus#11275](https://github.com/filecoin-project/lotus/pull/11275)) +- fix: test: temporarily exempt SynthPorep constants from test ([filecoin-project/lotus#11259](https://github.com/filecoin-project/lotus/pull/11259)) +- feat: skip TestSealAndVerify3 until it's fixed ([filecoin-project/lotus#11230](https://github.com/filecoin-project/lotus/pull/11230)) +- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#11250](https://github.com/filecoin-project/lotus/pull/11250)) +- fix: config: Update ColdStoreType comments ([filecoin-project/lotus#11274](https://github.com/filecoin-project/lotus/pull/11274)) +- readme: bump up golang version (#11347) ([filecoin-project/lotus#11347](https://github.com/filecoin-project/lotus/pull/11347)) +- chore: watermelon: upgrade epoch ([filecoin-project/lotus#11374](https://github.com/filecoin-project/lotus/pull/11374)) +- add support for v12 check invariants and also a default case to reduce future confusion (#11371) ([filecoin-project/lotus#11371](https://github.com/filecoin-project/lotus/pull/11371)) +- test: drand: switch tests to drand testnet (from devnet) (#11359) ([filecoin-project/lotus#11359](https://github.com/filecoin-project/lotus/pull/11359)) +- feat: chain: light-weight patch to fix calibrationnet again by removing move_partitions from built-in actors (#11409) ([filecoin-project/lotus#11409](https://github.com/filecoin-project/lotus/pull/11409)) +- chore: cli: Revert move-partitions cmd ([filecoin-project/lotus#11408](https://github.com/filecoin-project/lotus/pull/11408)) +- chore: forward-port calibnet hotfix to master ([filecoin-project/lotus#11407](https://github.com/filecoin-project/lotus/pull/11407)) +- fix: migration: set premigration to 90 minutes ([filecoin-project/lotus#11395](https://github.com/filecoin-project/lotus/pull/11395)) +- feat: chain: light-weight patch to fix calibrationnet (#11363) ([filecoin-project/lotus#11363](https://github.com/filecoin-project/lotus/pull/11363)) +- chore: merge feat/nv21 into master ([filecoin-project/lotus#11336](https://github.com/filecoin-project/lotus/pull/11336)) +- docs: Link the release section in the release flow doc ([filecoin-project/lotus#11299](https://github.com/filecoin-project/lotus/pull/11299)) +- fix: ci: fetch params for the storage unit tests ([filecoin-project/lotus#11441](https://github.com/filecoin-project/lotus/pull/11441)) +- Update mainnet.pi ([filecoin-project/lotus#11288](https://github.com/filecoin-project/lotus/pull/11288)) +- add go linter - "unused" (#11235) ([filecoin-project/lotus#11235](https://github.com/filecoin-project/lotus/pull/11235)) +- Fix/texts (#11298) ([filecoin-project/lotus#11298](https://github.com/filecoin-project/lotus/pull/11298)) +- fix typo in rate-limit flag description (#11316) ([filecoin-project/lotus#11316](https://github.com/filecoin-project/lotus/pull/11316)) +- eth_filter flake debug ([filecoin-project/lotus#11261](https://github.com/filecoin-project/lotus/pull/11261)) +- fix: sealing: typo in FinalizeReplicaUpdate ([filecoin-project/lotus#11255](https://github.com/filecoin-project/lotus/pull/11255)) +- chore: slice loop replace (#11349) ([filecoin-project/lotus#11349](https://github.com/filecoin-project/lotus/pull/11349)) +- backport: docker build fix for v1.25.2 ([filecoin-project/lotus#11560](https://github.com/filecoin-project/lotus/pull/11560)) + +## Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Andrew Jackson (Ajax) | 161 | +24328/-12464 | 4148 | +| Łukasz Magiera | 99 | +5238/-2690 | 260 | +| Shrenuj Bansal | 27 | +3402/-1265 | 76 | +| Fridrik Asmundsson | 15 | +1148/-307 | 58 | +| Steven Allen | 15 | +674/-337 | 35 | +| Ian Norden | 1 | +625/-3 | 4 | +| Aarsh Shah | 4 | +227/-167 | 14 | +| Phi | 19 | +190/-183 | 32 | +| Aayush Rajasekaran | 3 | +291/-56 | 16 | +| Mikers | 2 | +76/-262 | 19 | +| Aayush | 14 | +111/-59 | 21 | +| Friðrik Ásmundsson | 1 | +101/-1 | 2 | +| Alejandro Criado-Pérez | 1 | +36/-36 | 27 | +| Jie Hou | 5 | +36/-10 | 5 | +| Florian RUEN | 2 | +24/-19 | 5 | +| Phi-rjan | 3 | +20/-8 | 3 | +| Icarus9913 | 1 | +11/-11 | 6 | +| Jiaying Wang | 3 | +8/-7 | 5 | +| guangwu | 1 | +3/-10 | 2 | +| Marten Seemann | 1 | +6/-6 | 2 | +| simlecode | 1 | +0/-6 | 2 | +| GlacierWalrus | 2 | +0/-5 | 2 | +| Anton Evangelatov | 1 | +2/-2 | 1 | +| Ales Dumikau | 3 | +2/-2 | 3 | +| renran | 1 | +2/-1 | 1 | +| Volker Mische | 1 | +1/-1 | 1 | +| Icarus Wu | 1 | +1/-1 | 1 | +| Hubert | 1 | +1/-1 | 1 | +| Aloxaf | 1 | +1/-1 | 1 | +| Alejandro | 1 | +1/-1 | 1 | +| lazavikmaria | 1 | +1/-0 | 1 | + # v1.25.1 / 2023-12-09 This is a **highly recommended PATCH RELEASE.** The patch release fixes the issue were node operators trying to catch up sync were unable to sync large message blocks/epochs due to an increased number of messages on the network. @@ -9,6 +196,7 @@ This patch release allows for up to 10k messages per block. Additionally, it int ## Improvements - fix: exchange: allow up to 10k messages per block ([filecoin-project/lotus#11506](https://github.com/filecoin-project/lotus/pull/11506)) +>>>>>>> releases # v 1.25.0 / 2023-11-22 @@ -21,7 +209,7 @@ The full list of [protocol improvements delivered in the network upgrade can be ## ☢️ Upgrade Warnings ☢️ - Read through the [changelog of the mandatory v1.24.0 release](https://github.com/filecoin-project/lotus/releases/tag/v1.24.0). Especially the `Migration` and `v12 Builtin Actor Bundle` sections. -- Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this release. +- Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this release. - This feature release requires a minimum Go version of v1.20.7 or higher to successfully build Lotus. Go version 1.21.x is not supported yet. - EthRPC providers, please check out the [new tracing API to Lotus RPC](https://github.com/filecoin-project/lotus/pull/11100) @@ -94,6 +282,9 @@ Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimization - fix(client): single-root error message ([filecoin-project/lotus#11214](https://github.com/filecoin-project/lotus/pull/11214)) - fix: worker: Convert `DC_[SectorSize]_[ResourceRestriction]` if set ([filecoin-project/lotus#11224](https://github.com/filecoin-project/lotus/pull/11224)) - chore: backport #11338 onto release/v1.25.0 ([filecoin-project/lotus#11350](https://github.com/filecoin-project/lotus/pull/11350)) +- fix: lotus-provider: lotus-provider msg sending ([filecoin-project/lotus#11480](https://github.com/filecoin-project/lotus/pull/11480)) +- fix: lotus-provider: Fix winning PoSt ([filecoin-project/lotus#11483](https://github.com/filecoin-project/lotus/pull/11483)) +- chore: fix: sql Scan cannot write to an object ([filecoin-project/lotus#11487](https://github.com/filecoin-project/lotus/pull/11487)) ## Dependencies - deps: update go-libp2p to v0.28.1 ([filecoin-project/lotus#10998](https://github.com/filecoin-project/lotus/pull/10998)) @@ -183,7 +374,7 @@ account bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa init bafk2bzacebllyegx5r6lggf6ymyetbp7amacwpuxakhtjvjtvoy2bfkzk3vms ``` -## Migration +## Migration We are expecting a heavier than normal state migration for this upgrade due to the amount of state changes introduced for miner sector info. (This is a similar migration as the Shark upgrade, however, we have introduced a couple of migration performance optimizations since then for a smoother upgrade experience.) @@ -202,7 +393,7 @@ You can check out the [tutorial for benchmarking the network migration here.](ht ## BREAKING CHANGE -There is a new protocol limit on how many partition could be submited in one PoSt - if you have any customized tooling for batching PoSts, please update accordingly. +There is a new protocol limit on how many partition could be submited in one PoSt - if you have any customized tooling for batching PoSts, please update accordingly. - feat: limit PoSted partitions to 3 ([filecoin-project/lotus#11327](https://github.com/filecoin-project/lotus/pull/11327)) ## New features @@ -214,7 +405,7 @@ There is a new protocol limit on how many partition could be submited in one PoS ## Improvements - Backport: feat: sealing: Switch to calling PreCommitSectorBatch2 ([filecoin-project/lotus#11215](https://github.com/filecoin-project/lotus/pull/11215)) -- updated the boostrap nodes +- updated the boostrap nodes ## Dependencies - github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0) @@ -224,9 +415,9 @@ There is a new protocol limit on how many partition could be submited in one PoS - chore: deps: update libp2p to v0.30.0 #11434 -## Snapshots +## Snapshots -The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)! +The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)! @@ -255,7 +446,7 @@ This feature release requires a **minimum Go version of v1.19.12 or higher to su - feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797)) - feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929)) -## Improvements && Bug Fixe +## Improvements && Bug Fixes - chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040)) - feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015)) - sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002)) diff --git a/Dockerfile b/Dockerfile index 00930fb0f..c9750a71f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -109,6 +109,7 @@ COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/ +COPY --from=lotus-builder /opt/filecoin/lotus-provider /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/ @@ -117,11 +118,13 @@ RUN mkdir /var/lib/lotus RUN mkdir /var/lib/lotus-miner RUN mkdir /var/lib/lotus-worker RUN mkdir /var/lib/lotus-wallet +RUN mkdir /var/lib/lotus-provider RUN chown fc: /var/tmp/filecoin-proof-parameters RUN chown fc: /var/lib/lotus RUN chown fc: /var/lib/lotus-miner RUN chown fc: /var/lib/lotus-worker RUN chown fc: /var/lib/lotus-wallet +RUN chown fc: /var/lib/lotus-provider VOLUME /var/tmp/filecoin-proof-parameters @@ -129,6 +132,7 @@ VOLUME /var/lib/lotus VOLUME /var/lib/lotus-miner VOLUME /var/lib/lotus-worker VOLUME /var/lib/lotus-wallet +VOLUME /var/lib/lotus-provider EXPOSE 1234 EXPOSE 2345 diff --git a/LOTUS_RELEASE_FLOW.md b/LOTUS_RELEASE_FLOW.md index 4a327125a..8bb02d3c5 100644 --- a/LOTUS_RELEASE_FLOW.md +++ b/LOTUS_RELEASE_FLOW.md @@ -73,7 +73,7 @@ All releases under an odd minor version number indicate **feature releases**. Th Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number. -We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the Release Cycle section (TODO: Link). +We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the [Release Cycle section](#release-cycle). ### Examples Scenarios diff --git a/Makefile b/Makefile index b94c13c0d..a17b50d09 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ CLEAN+=build/.update-modules deps: $(BUILD_DEPS) .PHONY: deps -build-devnets: build lotus-seed lotus-shed +build-devnets: build lotus-seed lotus-shed lotus-provider .PHONY: build-devnets debug: GOFLAGS+=-tags=debug @@ -97,6 +97,15 @@ lotus-miner: $(BUILD_DEPS) .PHONY: lotus-miner BINS+=lotus-miner +lotus-provider: $(BUILD_DEPS) + rm -f lotus-provider + $(GOCC) build $(GOFLAGS) -o lotus-provider ./cmd/lotus-provider +.PHONY: lotus-provider +BINS+=lotus-provider + +lp2k: GOFLAGS+=-tags=2k +lp2k: lotus-provider + lotus-worker: $(BUILD_DEPS) rm -f lotus-worker $(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker @@ -115,13 +124,13 @@ lotus-gateway: $(BUILD_DEPS) .PHONY: lotus-gateway BINS+=lotus-gateway -build: lotus lotus-miner lotus-worker +build: lotus lotus-miner lotus-worker @[[ $$(type -P "lotus") ]] && echo "Caution: you have \ an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true .PHONY: build -install: install-daemon install-miner install-worker +install: install-daemon install-miner install-worker install-provider install-daemon: install -C ./lotus /usr/local/bin/lotus @@ -129,6 +138,9 @@ install-daemon: install-miner: install -C ./lotus-miner /usr/local/bin/lotus-miner +install-provider: + install -C ./lotus-provider /usr/local/bin/lotus-provider + install-worker: install -C ./lotus-worker /usr/local/bin/lotus-worker @@ -144,6 +156,9 @@ uninstall-daemon: uninstall-miner: rm -f /usr/local/bin/lotus-miner +uninstall-provider: + rm -f /usr/local/bin/lotus-provider + uninstall-worker: rm -f /usr/local/bin/lotus-worker @@ -241,6 +256,14 @@ install-miner-service: install-miner install-daemon-service @echo @echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup." +install-provider-service: install-provider install-daemon-service + mkdir -p /etc/systemd/system + mkdir -p /var/log/lotus + install -C -m 0644 ./scripts/lotus-provider.service /etc/systemd/system/lotus-provider.service + systemctl daemon-reload + @echo + @echo "lotus-provider service installed. Don't forget to run 'sudo systemctl start lotus-provider' to start it and 'sudo systemctl enable lotus-provider' for it to be enabled on startup." + install-main-services: install-miner-service install-all-services: install-main-services @@ -259,6 +282,12 @@ clean-miner-service: rm -f /etc/systemd/system/lotus-miner.service systemctl daemon-reload +clean-provider-service: + -systemctl stop lotus-provider + -systemctl disable lotus-provider + rm -f /etc/systemd/system/lotus-provider.service + systemctl daemon-reload + clean-main-services: clean-daemon-service clean-all-services: clean-main-services @@ -294,7 +323,8 @@ actors-code-gen: $(GOCC) run ./chain/actors/agen $(GOCC) fmt ./... -actors-gen: actors-code-gen fiximports +actors-gen: actors-code-gen + ./scripts/fiximports .PHONY: actors-gen bundle-gen: @@ -328,7 +358,7 @@ docsgen-md-bin: api-gen actors-gen docsgen-openrpc-bin: api-gen actors-gen $(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd -docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker +docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-provider docsgen-md-full: docsgen-md-bin ./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md @@ -337,6 +367,8 @@ docsgen-md-storage: docsgen-md-bin ./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md docsgen-md-worker: docsgen-md-bin ./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md +docsgen-md-provider: docsgen-md-bin + ./docgen-md "api/api_lp.go" "Provider" "api" "./api" > documentation/en/api-v0-methods-provider.md docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway @@ -354,21 +386,23 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin fiximports: ./scripts/fiximports -gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports +gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci + ./scripts/fiximports @echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'" .PHONY: gen jen: gen -snap: lotus lotus-miner lotus-worker +snap: lotus lotus-miner lotus-worker lotus-provider snapcraft # snapcraft upload ./lotus_*.snap # separate from gen because it needs binaries -docsgen-cli: lotus lotus-miner lotus-worker +docsgen-cli: lotus lotus-miner lotus-worker lotus-provider python3 ./scripts/generate-lotus-cli.py ./lotus config default > documentation/en/default-lotus-config.toml ./lotus-miner config default > documentation/en/default-lotus-miner-config.toml + ./lotus-provider config default > documentation/en/default-lotus-provider-config.toml .PHONY: docsgen-cli print-%: diff --git a/README.md b/README.md index f6ac75932..c944d41e6 100644 --- a/README.md +++ b/README.md @@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l #### Go -To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/): +To build Lotus, you need a working installation of [Go 1.20.10 or higher](https://golang.org/dl/): ```bash -wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +wget -c https://golang.org/dl/go1.20.10.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local ``` **TIP:** @@ -133,6 +133,8 @@ Note: The default branch `master` is the dev branch where the latest new feature 6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain). +7. (Optional) Follow the [Setting Up Prometheus and Grafana](https://github.com/filecoin-project/lotus/tree/master/metrics/README.md) guide for detailed instructions on setting up a working monitoring system running against a local running lotus node. + ## License Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) diff --git a/api/api_full.go b/api/api_full.go index f919bc13b..4ae2ea531 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -824,7 +824,7 @@ type FullNode interface { EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read - EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read + EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthUint64, error) //perm:read EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read diff --git a/api/api_gateway.go b/api/api_gateway.go index 27e725457..238bf43ab 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -114,7 +114,7 @@ type Gateway interface { EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) - EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) + EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthUint64, error) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) diff --git a/api/api_lp.go b/api/api_lp.go new file mode 100644 index 000000000..8b58379f8 --- /dev/null +++ b/api/api_lp.go @@ -0,0 +1,10 @@ +package api + +import "context" + +type LotusProvider interface { + Version(context.Context) (Version, error) //perm:admin + + // Trigger shutdown + Shutdown(context.Context) error //perm:admin +} diff --git a/api/client/client.go b/api/client/client.go index 8b159c5b1..4d51221f9 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -15,6 +15,16 @@ import ( "github.com/filecoin-project/lotus/lib/rpcenc" ) +// NewProviderRpc creates a new http jsonrpc client. +func NewProviderRpc(ctx context.Context, addr string, requestHeader http.Header) (api.LotusProvider, jsonrpc.ClientCloser, error) { + var res v1api.LotusProviderStruct + + closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", + api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors)) + + return &res, closer, err +} + // NewCommonRPCV0 creates a new http jsonrpc client. func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) { var res v0api.CommonNetStruct diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 018629600..5a05c8d0e 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -432,6 +432,10 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r i = &api.GatewayStruct{} t = reflect.TypeOf(new(struct{ api.Gateway })).Elem() permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal)) + case "Provider": + i = &api.LotusProviderStruct{} + t = reflect.TypeOf(new(struct{ api.LotusProvider })).Elem() + permStruct = append(permStruct, reflect.TypeOf(api.LotusProviderStruct{}.Internal)) default: panic("unknown type") } diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 856d83813..92b719550 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -1042,7 +1042,7 @@ func (mr *MockFullNodeMockRecorder) EthChainId(arg0 interface{}) *gomock.Call { } // EthEstimateGas mocks base method. -func (m *MockFullNode) EthEstimateGas(arg0 context.Context, arg1 ethtypes.EthCall) (ethtypes.EthUint64, error) { +func (m *MockFullNode) EthEstimateGas(arg0 context.Context, arg1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EthEstimateGas", arg0, arg1) ret0, _ := ret[0].(ethtypes.EthUint64) diff --git a/api/permissioned.go b/api/permissioned.go index 72d2239ee..f189cd78f 100644 --- a/api/permissioned.go +++ b/api/permissioned.go @@ -41,6 +41,12 @@ func PermissionedWorkerAPI(a Worker) Worker { return &out } +func PermissionedAPI[T, P any](a T) *P { + var out P + permissionedProxies(a, &out) + return &out +} + func PermissionedWalletAPI(a Wallet) Wallet { var out WalletStruct permissionedProxies(a, &out) diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 2d1333495..6627a5afe 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -255,7 +255,7 @@ type FullNodeMethods struct { EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"` - EthEstimateGas func(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) `perm:"read"` + EthEstimateGas func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) `perm:"read"` EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) `perm:"read"` @@ -679,7 +679,7 @@ type GatewayMethods struct { EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `` - EthEstimateGas func(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) `` + EthEstimateGas func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) `` EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) `` @@ -831,6 +831,19 @@ type GatewayMethods struct { type GatewayStub struct { } +type LotusProviderStruct struct { + Internal LotusProviderMethods +} + +type LotusProviderMethods struct { + Shutdown func(p0 context.Context) error `perm:"admin"` + + Version func(p0 context.Context) (Version, error) `perm:"admin"` +} + +type LotusProviderStub struct { +} + type NetStruct struct { Internal NetMethods } @@ -2134,14 +2147,14 @@ func (s *FullNodeStub) EthChainId(p0 context.Context) (ethtypes.EthUint64, error return *new(ethtypes.EthUint64), ErrNotSupported } -func (s *FullNodeStruct) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) { +func (s *FullNodeStruct) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { if s.Internal.EthEstimateGas == nil { return *new(ethtypes.EthUint64), ErrNotSupported } return s.Internal.EthEstimateGas(p0, p1) } -func (s *FullNodeStub) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) { +func (s *FullNodeStub) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { return *new(ethtypes.EthUint64), ErrNotSupported } @@ -4400,14 +4413,14 @@ func (s *GatewayStub) EthChainId(p0 context.Context) (ethtypes.EthUint64, error) return *new(ethtypes.EthUint64), ErrNotSupported } -func (s *GatewayStruct) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) { +func (s *GatewayStruct) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { if s.Internal.EthEstimateGas == nil { return *new(ethtypes.EthUint64), ErrNotSupported } return s.Internal.EthEstimateGas(p0, p1) } -func (s *GatewayStub) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) { +func (s *GatewayStub) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { return *new(ethtypes.EthUint64), ErrNotSupported } @@ -5214,6 +5227,28 @@ func (s *GatewayStub) Web3ClientVersion(p0 context.Context) (string, error) { return "", ErrNotSupported } +func (s *LotusProviderStruct) Shutdown(p0 context.Context) error { + if s.Internal.Shutdown == nil { + return ErrNotSupported + } + return s.Internal.Shutdown(p0) +} + +func (s *LotusProviderStub) Shutdown(p0 context.Context) error { + return ErrNotSupported +} + +func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) { + if s.Internal.Version == nil { + return *new(Version), ErrNotSupported + } + return s.Internal.Version(p0) +} + +func (s *LotusProviderStub) Version(p0 context.Context) (Version, error) { + return *new(Version), ErrNotSupported +} + func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) { if s.Internal.ID == nil { return *new(peer.ID), ErrNotSupported @@ -7442,6 +7477,7 @@ var _ CommonNet = new(CommonNetStruct) var _ EthSubscriber = new(EthSubscriberStruct) var _ FullNode = new(FullNodeStruct) var _ Gateway = new(GatewayStruct) +var _ LotusProvider = new(LotusProviderStruct) var _ Net = new(NetStruct) var _ Signable = new(SignableStruct) var _ StorageMiner = new(StorageMinerStruct) diff --git a/api/types.go b/api/types.go index b1ec23f74..93ed4083f 100644 --- a/api/types.go +++ b/api/types.go @@ -56,9 +56,17 @@ type PubsubScore struct { Score *pubsub.PeerScoreSnapshot } +// MessageSendSpec contains optional fields which modify message sending behavior type MessageSendSpec struct { - MaxFee abi.TokenAmount + // MaxFee specifies a cap on network fees related to this message + MaxFee abi.TokenAmount + + // MsgUuid specifies a unique message identifier which can be used on node (or node cluster) + // level to prevent double-sends of messages even when nonce generation is not handled by sender MsgUuid uuid.UUID + + // MaximizeFeeCap makes message FeeCap be based entirely on MaxFee + MaximizeFeeCap bool } type MpoolMessageWhole struct { diff --git a/api/v1api/latest.go b/api/v1api/latest.go index aefb1543b..b8eeed2de 100644 --- a/api/v1api/latest.go +++ b/api/v1api/latest.go @@ -12,3 +12,5 @@ type RawFullNodeAPI FullNode func PermissionedFullAPI(a FullNode) FullNode { return api.PermissionedFullAPI(a) } + +type LotusProviderStruct = api.LotusProviderStruct diff --git a/api/version.go b/api/version.go index 9c2113578..e968bf93b 100644 --- a/api/version.go +++ b/api/version.go @@ -59,6 +59,8 @@ var ( MinerAPIVersion0 = newVer(1, 5, 0) WorkerAPIVersion0 = newVer(1, 7, 0) + + ProviderAPIVersion0 = newVer(1, 0, 0) ) //nolint:varcheck,deadcode diff --git a/blockstore/idstore.go b/blockstore/idstore.go index fb575dca7..a10aee575 100644 --- a/blockstore/idstore.go +++ b/blockstore/idstore.go @@ -183,3 +183,17 @@ func (b *idstore) Close() error { func (b *idstore) Flush(ctx context.Context) error { return b.bs.Flush(ctx) } + +func (b *idstore) CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error { + if bs, ok := b.bs.(BlockstoreGC); ok { + return bs.CollectGarbage(ctx, options...) + } + return xerrors.Errorf("not supported") +} + +func (b *idstore) GCOnce(ctx context.Context, options ...BlockstoreGCOption) error { + if bs, ok := b.bs.(BlockstoreGCOnce); ok { + return bs.GCOnce(ctx, options...) + } + return xerrors.Errorf("not supported") +} diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 1f1ba0e99..c1a95c8b0 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -182,7 +182,6 @@ type SplitStore struct { compactionIndex int64 pruneIndex int64 - onlineGCCnt int64 ctx context.Context cancel func() diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 534565bf3..47caca886 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -66,8 +66,9 @@ var ( ) const ( - batchSize = 16384 - cidKeySize = 128 + batchSize = 16384 + cidKeySize = 128 + purgeWorkSliceDuration = time.Second ) func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { @@ -1372,9 +1373,21 @@ func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet return err } + now := time.Now() + err := coldr.ForEach(func(c cid.Cid) error { batch = append(batch, c) if len(batch) == batchSize { + // add some time slicing to the purge as this a very disk I/O heavy operation that + // requires write access to txnLk that may starve other operations that require + // access to the blockstore. + elapsed := time.Since(now) + if elapsed > purgeWorkSliceDuration { + // work 1 slice, sleep 4 slices, or 20% utilization + time.Sleep(4 * elapsed) + now = time.Now() + } + return deleteBatch() } diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index 63e77b47e..1b821654d 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -3,9 +3,9 @@ package splitstore import ( "context" + "crypto/rand" "errors" "fmt" - "math/rand" "sync" "sync/atomic" "testing" diff --git a/build/bootstrap/mainnet.pi b/build/bootstrap/mainnet.pi index f053d4e29..7838158de 100644 --- a/build/bootstrap/mainnet.pi +++ b/build/bootstrap/mainnet.pi @@ -1,9 +1,6 @@ /dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj /dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc /dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4 -/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R -/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc -/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH /dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ /dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf /dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR @@ -11,7 +8,5 @@ /dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz /dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u /dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt -/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d -/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP /dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt /dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST diff --git a/build/isnearupgrade.go b/build/isnearupgrade.go index 4273f0e9e..74975780f 100644 --- a/build/isnearupgrade.go +++ b/build/isnearupgrade.go @@ -5,5 +5,8 @@ import ( ) func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool { + if upgradeEpoch < 0 { + return false + } return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality } diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 4e8edaee0..dbfbbe1f0 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz index 3d83445ed..bde1ec8f4 100644 Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 0f75197d7..b5eb84c2f 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index cbbe526db..63b778855 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/version.go b/build/version.go index dbf3c96f4..ebd744f8b 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.25.1" +const BuildVersion = "1.25.2" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index b322b2283..3c7f05d9a 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -72,7 +72,7 @@ func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.Token available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 7d5eaf8e0..5301ed1cb 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -62,7 +62,7 @@ func (s *state0) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v10.go b/chain/actors/builtin/miner/v10.go index 4d47ba396..53dc90b45 100644 --- a/chain/actors/builtin/miner/v10.go +++ b/chain/actors/builtin/miner/v10.go @@ -62,7 +62,7 @@ func (s *state10) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v11.go b/chain/actors/builtin/miner/v11.go index a3ffd606f..11a91c26b 100644 --- a/chain/actors/builtin/miner/v11.go +++ b/chain/actors/builtin/miner/v11.go @@ -62,7 +62,7 @@ func (s *state11) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v12.go b/chain/actors/builtin/miner/v12.go index 787da7d0f..90ecc97fd 100644 --- a/chain/actors/builtin/miner/v12.go +++ b/chain/actors/builtin/miner/v12.go @@ -62,7 +62,7 @@ func (s *state12) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index 14341ae38..5a81ad31f 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -61,7 +61,7 @@ func (s *state2) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go index 52808da8c..aa1574cf4 100644 --- a/chain/actors/builtin/miner/v3.go +++ b/chain/actors/builtin/miner/v3.go @@ -62,7 +62,7 @@ func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go index 5980ef769..1faf30c09 100644 --- a/chain/actors/builtin/miner/v4.go +++ b/chain/actors/builtin/miner/v4.go @@ -62,7 +62,7 @@ func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go index 886300ea3..be4b5e0b2 100644 --- a/chain/actors/builtin/miner/v5.go +++ b/chain/actors/builtin/miner/v5.go @@ -62,7 +62,7 @@ func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v6.go b/chain/actors/builtin/miner/v6.go index 4737b0ee2..fa8c30e40 100644 --- a/chain/actors/builtin/miner/v6.go +++ b/chain/actors/builtin/miner/v6.go @@ -62,7 +62,7 @@ func (s *state6) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v7.go b/chain/actors/builtin/miner/v7.go index 72803eb75..d6bb0e16e 100644 --- a/chain/actors/builtin/miner/v7.go +++ b/chain/actors/builtin/miner/v7.go @@ -62,7 +62,7 @@ func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v8.go b/chain/actors/builtin/miner/v8.go index 3e3739591..06a205e76 100644 --- a/chain/actors/builtin/miner/v8.go +++ b/chain/actors/builtin/miner/v8.go @@ -62,7 +62,7 @@ func (s *state8) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v9.go b/chain/actors/builtin/miner/v9.go index 72d9dbd59..6cbbd509e 100644 --- a/chain/actors/builtin/miner/v9.go +++ b/chain/actors/builtin/miner/v9.go @@ -62,7 +62,7 @@ func (s *state9) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/paych/actor.go.template b/chain/actors/builtin/paych/actor.go.template index e19ac5e29..3498a7a49 100644 --- a/chain/actors/builtin/paych/actor.go.template +++ b/chain/actors/builtin/paych/actor.go.template @@ -29,7 +29,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -// Load returns an abstract copy of payment channel state, irregardless of actor version +// Load returns an abstract copy of payment channel state, regardless of actor version func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name != manifest.PaychKey { diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index 8a7979e95..fc8908cb4 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -29,7 +29,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -// Load returns an abstract copy of payment channel state, irregardless of actor version +// Load returns an abstract copy of payment channel state, regardless of actor version func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name != manifest.PaychKey { diff --git a/chain/actors/params.go b/chain/actors/params.go index f09b0be55..866c72b99 100644 --- a/chain/actors/params.go +++ b/chain/actors/params.go @@ -13,7 +13,7 @@ import ( func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { buf := new(bytes.Buffer) if err := i.MarshalCBOR(buf); err != nil { - // TODO: shouldnt this be a fatal error? + // TODO: shouldn't this be a fatal error? return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter") } return buf.Bytes(), nil diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index a0e4728fe..6d2b41154 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -867,6 +867,24 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base } } +var PoStToSealMap map[abi.RegisteredPoStProof]abi.RegisteredSealProof + +func init() { + PoStToSealMap = make(map[abi.RegisteredPoStProof]abi.RegisteredSealProof) + for sealProof, info := range abi.SealProofInfos { + PoStToSealMap[info.WinningPoStProof] = sealProof + PoStToSealMap[info.WindowPoStProof] = sealProof + } +} + +func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { + sealProof, exists := PoStToSealMap[postProof] + if !exists { + return 0, xerrors.New("no corresponding RegisteredSealProof for the given RegisteredPoStProof") + } + return sealProof, nil +} + func min(a, b int) int { if a < b { return a diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template index 8803c97e6..d13518e0a 100644 --- a/chain/actors/policy/policy.go.template +++ b/chain/actors/policy/policy.go.template @@ -343,9 +343,26 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base } } +var PoStToSealMap map[abi.RegisteredPoStProof]abi.RegisteredSealProof +func init() { + PoStToSealMap = make(map[abi.RegisteredPoStProof]abi.RegisteredSealProof) + for sealProof, info := range abi.SealProofInfos { + PoStToSealMap[info.WinningPoStProof] = sealProof + PoStToSealMap[info.WindowPoStProof] = sealProof + } +} + +func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { + sealProof, exists := PoStToSealMap[postProof] + if !exists { + return 0, xerrors.New("no corresponding RegisteredSealProof for the given RegisteredPoStProof") + } + return sealProof, nil +} + func min(a, b int) int { if a < b { return a } return b -} \ No newline at end of file +} diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index aa5e36717..5825fa691 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -29,19 +29,6 @@ import ( var log = logging.Logger("drand") -type drandPeer struct { - addr string - tls bool -} - -func (dp *drandPeer) Address() string { - return dp.addr -} - -func (dp *drandPeer) IsTLS() bool { - return dp.tls -} - // DrandBeacon connects Lotus with a drand network in order to provide // randomness to the system in a way that's aligned with Filecoin rounds/epochs. // diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index 7269139ca..7434241a5 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -17,7 +17,7 @@ import ( ) func TestPrintGroupInfo(t *testing.T) { - server := build.DrandConfigs[build.DrandDevnet].Servers[0] + server := build.DrandConfigs[build.DrandTestnet].Servers[0] c, err := hclient.New(server, nil, nil) assert.NoError(t, err) cg := c.(interface { @@ -31,7 +31,7 @@ func TestPrintGroupInfo(t *testing.T) { func TestMaxBeaconRoundForEpoch(t *testing.T) { todayTs := uint64(1652222222) - db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandDevnet]) + db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandTestnet]) assert.NoError(t, err) mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100) mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100) diff --git a/chain/consensus/common.go b/chain/consensus/common.go index 1d9fb3646..a7e5c40d2 100644 --- a/chain/consensus/common.go +++ b/chain/consensus/common.go @@ -362,7 +362,8 @@ func CreateBlockHeader(ctx context.Context, sm *stmgr.StateManager, pts *types.T var blsMsgCids, secpkMsgCids []cid.Cid var blsSigs []crypto.Signature nv := sm.GetNetworkVersion(ctx, bt.Epoch) - for _, msg := range bt.Messages { + for _, msgTmp := range bt.Messages { + msg := msgTmp if msg.Signature.Type == crypto.SigTypeBLS { blsSigs = append(blsSigs, msg.Signature) blsMessages = append(blsMessages, &msg.Message) diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go index 1edeb60b7..4b993b3e7 100644 --- a/chain/consensus/compute_state.go +++ b/chain/consensus/compute_state.go @@ -147,9 +147,6 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, return xerrors.Errorf("callback failed on cron message: %w", err) } } - if ret.ExitCode != 0 { - return xerrors.Errorf("cron exit was non-zero: %d", ret.ExitCode) - } return nil } diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index b5ec13a60..bb70d5d11 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -91,9 +91,6 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito } } - if ret.ExitCode != 0 { - return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) - } return nil } diff --git a/chain/events/events_height.go b/chain/events/events_height.go index 457933fc6..5789be753 100644 --- a/chain/events/events_height.go +++ b/chain/events/events_height.go @@ -180,7 +180,7 @@ func (e *heightEventsObserver) Revert(ctx context.Context, from, to *types.TipSe // Update the head first so we don't accidental skip reverting a concurrent call to ChainAt. e.updateHead(to) - // Call revert on all hights between the two tipsets, handling empty tipsets. + // Call revert on all heights between the two tipsets, handling empty tipsets. for h := from.Height(); h > to.Height(); h-- { e.lk.Lock() triggers := e.tsHeights[h] diff --git a/chain/events/events_test.go b/chain/events/events_test.go index e2450909c..f16434355 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -673,7 +673,7 @@ func TestCalled(t *testing.T) { }, 3, 20, matchAddrMethod(t0123, 5)) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) @@ -991,7 +991,7 @@ func TestCalledNull(t *testing.T) { }, 3, 20, matchAddrMethod(t0123, 5)) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) @@ -1050,7 +1050,7 @@ func TestRemoveTriggersOnMessage(t *testing.T) { }, 3, 20, matchAddrMethod(t0123, 5)) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) @@ -1155,7 +1155,7 @@ func TestStateChanged(t *testing.T) { }) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index b821a2f83..24192a53e 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -388,7 +388,7 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { // Filter needs historic events - if err := m.EventIndex.PrefillFilter(ctx, f); err != nil { + if err := m.EventIndex.PrefillFilter(ctx, f, true); err != nil { return nil, err } } diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index bacba60d7..2b1890c73 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -481,7 +481,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // PrefillFilter fills a filter's collection of events from the historic index -func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error { +func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, excludeReverted bool) error { clauses := []string{} values := []any{} joins := []string{} @@ -500,6 +500,11 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error { } } + if excludeReverted { + clauses = append(clauses, "event.reverted=?") + values = append(values, false) + } + if len(f.addresses) > 0 { subclauses := []string{} for _, addr := range f.addresses { diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go index fcdb1ab05..f9b1b14ad 100644 --- a/chain/events/filter/index_test.go +++ b/chain/events/filter/index_test.go @@ -272,7 +272,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { for _, tc := range testCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter); err != nil { + if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { require.NoError(t, err, "prefill filter events") } @@ -281,3 +281,619 @@ func TestEventIndexPrefillFilter(t *testing.T) { }) } } + +func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { + rng := pseudo.New(pseudo.NewSource(299792458)) + a1 := randomF4Addr(t, rng) + a2 := randomF4Addr(t, rng) + a3 := randomF4Addr(t, rng) + + a1ID := abi.ActorID(1) + a2ID := abi.ActorID(2) + + addrMap := addressMap{} + addrMap.add(a1ID, a1) + addrMap.add(a2ID, a2) + + ev1 := fakeEvent( + a1ID, + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + ev2 := fakeEvent( + a2ID, + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988182")}, + }, + ) + + st := newStore() + events := []*types.Event{ev1} + revertedEvents := []*types.Event{ev2} + em := executedMessage{ + msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), + rct: fakeReceipt(t, rng, st, events), + evs: events, + } + revertedEm := executedMessage{ + msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), + rct: fakeReceipt(t, rng, st, revertedEvents), + evs: revertedEvents, + } + + events14000 := buildTipSetEvents(t, rng, 14000, em) + revertedEvents14000 := buildTipSetEvents(t, rng, 14000, revertedEm) + cid14000, err := events14000.msgTs.Key().Cid() + require.NoError(t, err, "tipset cid") + reveredCID14000, err := revertedEvents14000.msgTs.Key().Cid() + require.NoError(t, err, "tipset cid") + + noCollectedEvents := []*CollectedEvent{} + oneCollectedEvent := []*CollectedEvent{ + { + Entries: ev1.Entries, + EmitterAddr: a1, + EventIdx: 0, + Reverted: false, + Height: 14000, + TipSetKey: events14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: em.msg.Cid(), + }, + } + twoCollectedEvent := []*CollectedEvent{ + { + Entries: ev1.Entries, + EmitterAddr: a1, + EventIdx: 0, + Reverted: false, + Height: 14000, + TipSetKey: events14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: em.msg.Cid(), + }, + { + Entries: ev2.Entries, + EmitterAddr: a2, + EventIdx: 0, + Reverted: true, + Height: 14000, + TipSetKey: revertedEvents14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: revertedEm.msg.Cid(), + }, + } + oneCollectedRevertedEvent := []*CollectedEvent{ + { + Entries: ev2.Entries, + EmitterAddr: a2, + EventIdx: 0, + Reverted: true, + Height: 14000, + TipSetKey: revertedEvents14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: revertedEm.msg.Cid(), + }, + } + + workDir, err := os.MkdirTemp("", "lotusevents") + require.NoError(t, err, "create temporary work directory") + + defer func() { + _ = os.RemoveAll(workDir) + }() + t.Logf("using work dir %q", workDir) + + dbPath := filepath.Join(workDir, "actorevents.db") + + ei, err := NewEventIndex(context.Background(), dbPath, nil) + require.NoError(t, err, "create event index") + if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil { + require.NoError(t, err, "collect reverted events") + } + if err := ei.CollectEvents(context.Background(), revertedEvents14000, true, addrMap.ResolveAddress); err != nil { + require.NoError(t, err, "revert reverted events") + } + if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { + require.NoError(t, err, "collect events") + } + + inclusiveTestCases := []struct { + name string + filter *EventFilter + te *TipSetEvents + want []*CollectedEvent + }{ + { + name: "nomatch tipset min height", + filter: &EventFilter{ + minHeight: 14001, + maxHeight: -1, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch tipset max height", + filter: &EventFilter{ + minHeight: -1, + maxHeight: 13999, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match tipset min height", + filter: &EventFilter{ + minHeight: 14000, + maxHeight: -1, + }, + te: events14000, + want: twoCollectedEvent, + }, + { + name: "match tipset cid", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: cid14000, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match tipset cid", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: reveredCID14000, + }, + te: revertedEvents14000, + want: oneCollectedRevertedEvent, + }, + { + name: "nomatch address", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a3}, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match address 2", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a2}, + }, + te: revertedEvents14000, + want: oneCollectedRevertedEvent, + }, + { + name: "match address 1", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a1}, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: twoCollectedEvent, + }, + { + name: "match one entry with alternate values", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + []byte("approval"), + }, + }, + }, + te: events14000, + want: twoCollectedEvent, + }, + { + name: "nomatch one entry by missing value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry by missing key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "method": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match one entry with multiple keys", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry with multiple keys", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr2"), + }, + }, + }, + te: revertedEvents14000, + want: oneCollectedRevertedEvent, + }, + { + name: "nomatch one entry with one mismatching key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "approver": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one mismatching value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr3"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one unindexed key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "amount": { + []byte("2988181"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one unindexed key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "amount": { + []byte("2988182"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + } + + exclusiveTestCases := []struct { + name string + filter *EventFilter + te *TipSetEvents + want []*CollectedEvent + }{ + { + name: "nomatch tipset min height", + filter: &EventFilter{ + minHeight: 14001, + maxHeight: -1, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch tipset max height", + filter: &EventFilter{ + minHeight: -1, + maxHeight: 13999, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match tipset min height", + filter: &EventFilter{ + minHeight: 14000, + maxHeight: -1, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match tipset cid", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: cid14000, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match tipset cid but reverted", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: reveredCID14000, + }, + te: revertedEvents14000, + want: noCollectedEvents, + }, + { + name: "nomatch address", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a3}, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch address 2 but reverted", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a2}, + }, + te: revertedEvents14000, + want: noCollectedEvents, + }, + { + name: "match address", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a1}, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry with alternate values", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + []byte("approval"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "nomatch one entry by missing value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry by missing key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "method": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match one entry with multiple keys", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "nomatch one entry with one mismatching key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "approver": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with matching reverted value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr2"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one mismatching value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr3"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one unindexed key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "amount": { + []byte("2988181"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + } + + for _, tc := range inclusiveTestCases { + tc := tc // appease lint + t.Run(tc.name, func(t *testing.T) { + if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { + require.NoError(t, err, "prefill filter events") + } + + coll := tc.filter.TakeCollectedEvents(context.Background()) + require.ElementsMatch(t, coll, tc.want, tc.name) + }) + } + + for _, tc := range exclusiveTestCases { + tc := tc // appease lint + t.Run(tc.name, func(t *testing.T) { + if err := ei.PrefillFilter(context.Background(), tc.filter, true); err != nil { + require.NoError(t, err, "prefill filter events") + } + + coll := tc.filter.TakeCollectedEvents(context.Background()) + require.ElementsMatch(t, coll, tc.want, tc.name) + }) + } +} diff --git a/chain/events/state/ctxstore.go b/chain/events/state/ctxstore.go deleted file mode 100644 index 12b45e425..000000000 --- a/chain/events/state/ctxstore.go +++ /dev/null @@ -1,25 +0,0 @@ -package state - -import ( - "context" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" -) - -type contextStore struct { - ctx context.Context - cst *cbor.BasicIpldStore -} - -func (cs *contextStore) Context() context.Context { - return cs.ctx -} - -func (cs *contextStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { - return cs.cst.Get(ctx, c, out) -} - -func (cs *contextStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { - return cs.cst.Put(ctx, v) -} diff --git a/chain/exchange/client.go b/chain/exchange/client.go index fca8249ce..9cbb44955 100644 --- a/chain/exchange/client.go +++ b/chain/exchange/client.go @@ -247,7 +247,7 @@ func (c *client) processResponse(req *Request, res *Response, tipsets []*types.T // If we didn't request the headers they should have been provided // by the caller. if len(tipsets) < len(res.Chain) { - return nil, xerrors.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) + return nil, xerrors.Errorf("not enough tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) } chain := make([]*BSTipSet, 0, resLength) for i, resChain := range res.Chain { @@ -284,16 +284,18 @@ func (c *client) validateCompressedIndices(chain []*BSTipSet) error { len(msgs.SecpkIncludes), blocksNum) } + blsLen := uint64(len(msgs.Bls)) + secpLen := uint64(len(msgs.Secpk)) for blockIdx := 0; blockIdx < blocksNum; blockIdx++ { for _, mi := range msgs.BlsIncludes[blockIdx] { - if int(mi) >= len(msgs.Bls) { + if mi >= blsLen { return xerrors.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)", mi, len(msgs.Bls)) } } for _, mi := range msgs.SecpkIncludes[blockIdx] { - if int(mi) >= len(msgs.Secpk) { + if mi >= secpLen { return xerrors.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)", mi, len(msgs.Secpk)) } @@ -315,18 +317,36 @@ func (c *client) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ) } - req := &Request{ - Head: tsk.Cids(), - Length: uint64(count), - Options: Headers, + var ret []*types.TipSet + start := tsk.Cids() + for len(ret) < count { + req := &Request{ + Head: start, + Length: uint64(count - len(ret)), + Options: Headers, + } + + validRes, err := c.doRequest(ctx, req, nil, nil) + if err != nil { + return nil, xerrors.Errorf("failed to doRequest: %w", err) + } + + if len(validRes.tipsets) == 0 { + return nil, xerrors.Errorf("doRequest fetched zero tipsets: %w", err) + } + + ret = append(ret, validRes.tipsets...) + + last := validRes.tipsets[len(validRes.tipsets)-1] + if last.Height() <= 1 { + // we've walked all the way up to genesis, return + break + } + + start = last.Parents().Cids() } - validRes, err := c.doRequest(ctx, req, nil, nil) - if err != nil { - return nil, err - } - - return validRes.tipsets, nil + return ret, nil } // GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there. @@ -341,12 +361,16 @@ func (c *client) GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipS validRes, err := c.doRequest(ctx, req, &peer, nil) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to doRequest: %w", err) } - return validRes.toFullTipSets()[0], nil - // If `doRequest` didn't fail we are guaranteed to have at least - // *one* tipset here, so it's safe to index directly. + fullTipsets := validRes.toFullTipSets() + + if len(fullTipsets) == 0 { + return nil, xerrors.New("unexpectedly got no tipsets in exchange") + } + + return fullTipsets[0], nil } // GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there. @@ -386,7 +410,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque defer span.End() if span.IsRecordingEvents() { span.AddAttributes( - trace.StringAttribute("peer", peer.Pretty()), + trace.StringAttribute("peer", peer.String()), ) } defer func() { diff --git a/chain/exchange/interfaces.go b/chain/exchange/interfaces.go index c95127929..ff11b63eb 100644 --- a/chain/exchange/interfaces.go +++ b/chain/exchange/interfaces.go @@ -28,8 +28,8 @@ type Server interface { // used by the Syncer. type Client interface { // GetBlocks fetches block headers from the network, from the provided - // tipset *backwards*, returning as many tipsets as the count parameter, - // or less. + // tipset *backwards*, returning as many tipsets as the count parameter. + // The ONLY case in which we return fewer than `count` tipsets is if we hit genesis. GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) // GetChainMessages fetches messages from the network, starting from the first provided tipset diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 0bac282d2..df8900cab 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -251,7 +251,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal } params := &markettypes.PublishStorageDealsParams{} - for _, preseal := range m.Sectors { + for _, presealTmp := range m.Sectors { + preseal := presealTmp preseal.Deal.VerifiedDeal = true preseal.Deal.EndEpoch = minerInfos[i].presealExp p := markettypes.ClientDealProposal{ diff --git a/chain/index/msgindex.go b/chain/index/msgindex.go index 27eeea73e..e9e81ae2c 100644 --- a/chain/index/msgindex.go +++ b/chain/index/msgindex.go @@ -131,7 +131,7 @@ func NewMsgIndex(lctx context.Context, basePath string, cs ChainStore) (MsgIndex db, err := sql.Open("sqlite3", dbPath) if err != nil { - // TODO [nice to have]: automaticaly delete corrupt databases + // TODO [nice to have]: automatically delete corrupt databases // but for now we can just error and let the operator delete. return nil, xerrors.Errorf("error opening msgindex database: %w", err) } diff --git a/chain/market/store.go b/chain/market/store.go index ece1248f6..10ab2abe1 100644 --- a/chain/market/store.go +++ b/chain/market/store.go @@ -39,23 +39,6 @@ func (ps *Store) save(ctx context.Context, state *FundedAddressState) error { return ps.ds.Put(ctx, k, b) } -// get the state for the given address -func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) { - k := dskeyForAddr(addr) - - data, err := ps.ds.Get(ctx, k) - if err != nil { - return nil, err - } - - var state FundedAddressState - err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state) - if err != nil { - return nil, err - } - return &state, nil -} - // forEach calls iter with each address in the datastore func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error { res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr}) diff --git a/chain/messagepool/block_proba_test.go b/chain/messagepool/block_proba_test.go index 6d121d222..2dc1dc25d 100644 --- a/chain/messagepool/block_proba_test.go +++ b/chain/messagepool/block_proba_test.go @@ -5,7 +5,6 @@ import ( "math" "math/rand" "testing" - "time" ) func TestBlockProbability(t *testing.T) { @@ -23,7 +22,6 @@ func TestBlockProbability(t *testing.T) { func TestWinnerProba(t *testing.T) { //stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_002 - rand.Seed(time.Now().UnixNano()) const N = 1000000 winnerProba := noWinnersProb() sum := 0 diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 6dc3f2239..7d55b0b16 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -21,6 +21,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/minio/blake2b-simd" "github.com/raulk/clock" + "go.opencensus.io/stats" "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" @@ -210,8 +211,10 @@ func ComputeRBF(curPrem abi.TokenAmount, replaceByFeeRatio types.Percent) abi.To func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) { var maxFee abi.TokenAmount + var maximizeFeeCap bool if sendSpec != nil { maxFee = sendSpec.MaxFee + maximizeFeeCap = sendSpec.MaximizeFeeCap } if maxFee.Int == nil || maxFee.Equals(big.Zero()) { mf, err := mff() @@ -222,15 +225,12 @@ func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.M maxFee = mf } - gl := types.NewInt(uint64(msg.GasLimit)) - totalFee := types.BigMul(msg.GasFeeCap, gl) - - if totalFee.LessThanEqual(maxFee) { - msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap - return + gaslimit := types.NewInt(uint64(msg.GasLimit)) + totalFee := types.BigMul(msg.GasFeeCap, gaslimit) + if maximizeFeeCap || totalFee.GreaterThan(maxFee) { + msg.GasFeeCap = big.Div(maxFee, gaslimit) } - msg.GasFeeCap = big.Div(maxFee, gl) msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap } @@ -1022,6 +1022,9 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st } }) + // Record the current size of the Mpool + stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize))) + return nil } @@ -1214,6 +1217,9 @@ func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce u return } } + + // Record the current size of the Mpool + stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize))) } func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { diff --git a/chain/state/statetree.go b/chain/state/statetree.go index c71473e8f..61d7d500a 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -438,7 +438,8 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) { return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack") } - for addr, sto := range st.snaps.layers[0].actors { + for addr, stoTmp := range st.snaps.layers[0].actors { + sto := stoTmp if sto.Delete { if err := st.root.Delete(abi.AddrKey(addr)); err != nil { return cid.Undef, err @@ -570,7 +571,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error } // no need to record anything here, there are no duplicates in the actors HAMT - // iself. + // itself. if _, ok := seen[addr]; ok { return nil } @@ -588,7 +589,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error } // no need to record anything here, there are no duplicates in the actors HAMT - // iself. + // itself. if _, ok := seen[addr]; ok { return nil } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 8d1ac1dfb..9dd66ee8b 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -235,11 +235,6 @@ func (sm *StateManager) hasExpensiveForkBetween(parent, height abi.ChainEpoch) b return false } -func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool { - _, ok := sm.expensiveUpgrades[height] - return ok -} - func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv16.MemMigrationCache, ts *types.TipSet) { height := ts.Height() parent := ts.ParentState() diff --git a/chain/store/index_test.go b/chain/store/index_test.go index a3a4ad6ce..3cde40062 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -42,7 +42,7 @@ func TestIndexSeeks(t *testing.T) { cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - _, err = cs.Import(ctx, bytes.NewReader(gencar)) + _, _, err = cs.Import(ctx, bytes.NewReader(gencar)) if err != nil { t.Fatal(err) } diff --git a/chain/store/messages.go b/chain/store/messages.go index 3686f74f4..c23f900d7 100644 --- a/chain/store/messages.go +++ b/chain/store/messages.go @@ -212,13 +212,8 @@ func (cs *ChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ( var out []types.ChainMsg for _, bm := range bmsgs { - for _, blsm := range bm.BlsMessages { - out = append(out, blsm) - } - - for _, secm := range bm.SecpkMessages { - out = append(out, secm) - } + out = append(out, bm.BlsMessages...) + out = append(out, bm.SecpkMessages...) } return out, nil diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go index 5e218fa36..de2190c5d 100644 --- a/chain/store/snapshot.go +++ b/chain/store/snapshot.go @@ -60,7 +60,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo }) } -func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, error) { +func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (head *types.TipSet, genesis *types.BlockHeader, err error) { // TODO: writing only to the state blockstore is incorrect. // At this time, both the state and chain blockstores are backed by the // universal store. When we physically segregate the stores, we will need @@ -69,7 +69,7 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e br, err := carv2.NewBlockReader(r) if err != nil { - return nil, xerrors.Errorf("loadcar failed: %w", err) + return nil, nil, xerrors.Errorf("loadcar failed: %w", err) } s := cs.StateBlockstore() @@ -80,27 +80,51 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e putThrottle <- nil } + if len(br.Roots) == 0 { + return nil, nil, xerrors.Errorf("no roots in snapshot car file") + } + nextTailCid := br.Roots[0] + + var tailBlock types.BlockHeader + tailBlock.Height = abi.ChainEpoch(-1) + var buf []blocks.Block for { blk, err := br.Next() if err != nil { + + // we're at the end if err == io.EOF { if len(buf) > 0 { if err := s.PutMany(ctx, buf); err != nil { - return nil, err + return nil, nil, err } } break } - return nil, err + return nil, nil, err } + // check for header block, looking for genesis + if blk.Cid() == nextTailCid && tailBlock.Height != 0 { + if err := tailBlock.UnmarshalCBOR(bytes.NewReader(blk.RawData())); err != nil { + return nil, nil, xerrors.Errorf("failed to unmarshal genesis block: %w", err) + } + if len(tailBlock.Parents) > 0 { + nextTailCid = tailBlock.Parents[0] + } else { + // note: even the 0th block has a parent linking to the cbor genesis block + return nil, nil, xerrors.Errorf("current block (epoch %d cid %s) has no parents", tailBlock.Height, tailBlock.Cid()) + } + } + + // append to batch buf = append(buf, blk) if len(buf) > 1000 { if lastErr := <-putThrottle; lastErr != nil { // consume one error to have the right to add one - return nil, lastErr + return nil, nil, lastErr } go func(buf []blocks.Block) { @@ -113,13 +137,17 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e // check errors for i := 0; i < parallelPuts; i++ { if lastErr := <-putThrottle; lastErr != nil { - return nil, lastErr + return nil, nil, lastErr } } + if tailBlock.Height != 0 { + return nil, nil, xerrors.Errorf("expected genesis block to have height 0 (genesis), got %d: %s", tailBlock.Height, tailBlock.Cid()) + } + root, err := cs.LoadTipSet(ctx, types.NewTipSetKey(br.Roots...)) if err != nil { - return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) + return nil, nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) } ts := root @@ -135,10 +163,10 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e } if err := cs.PersistTipsets(ctx, tssToPersist); err != nil { - return nil, xerrors.Errorf("failed to persist tipsets: %w", err) + return nil, nil, xerrors.Errorf("failed to persist tipsets: %w", err) } - return root, nil + return root, &tailBlock, nil } type walkSchedTaskType int @@ -167,7 +195,7 @@ func (t walkSchedTaskType) String() string { case dagTask: return "dag" } - panic(fmt.Sprintf("unknow task %d", t)) + panic(fmt.Sprintf("unknown task %d", t)) } type walkTask struct { @@ -656,9 +684,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe } if b.Height > 0 { - for _, p := range b.Parents { - blocksToWalk = append(blocksToWalk, p) - } + blocksToWalk = append(blocksToWalk, b.Parents...) } else { // include the genesis block cids = append(cids, b.Parents...) diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 9c717fdbe..1ecfc474a 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -118,7 +118,7 @@ func TestChainExportImport(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(context.TODO(), buf) + root, _, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } @@ -153,7 +153,7 @@ func TestChainImportTipsetKeyCid(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(ctx, buf) + root, _, err := cs.Import(ctx, buf) require.NoError(t, err) require.Truef(t, root.Equals(last), "imported chain differed from exported chain") @@ -202,7 +202,7 @@ func TestChainExportImportFull(t *testing.T) { cs := store.NewChainStore(nbs, nbs, ds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(context.TODO(), buf) + root, _, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 3a11f7c98..b50ddc467 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -516,7 +516,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg return pubsub.ValidationReject } if len(idxrMsg.ExtraData) == 0 { - log.Debugw("ignoring messsage missing miner id", "peer", originPeer) + log.Debugw("ignoring message missing miner id", "peer", originPeer) return pubsub.ValidationIgnore } @@ -552,7 +552,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg // Check that the miner ID maps to the peer that sent the message. err = v.authenticateMessage(ctx, minerAddr, originPeer) if err != nil { - log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerAddr) + log.Warnw("cannot authenticate message", "err", err, "peer", originPeer, "minerID", minerAddr) stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1)) return pubsub.ValidationReject } diff --git a/chain/sub/ratelimit/queue_test.go b/chain/sub/ratelimit/queue_test.go new file mode 100644 index 000000000..cd66a423e --- /dev/null +++ b/chain/sub/ratelimit/queue_test.go @@ -0,0 +1,61 @@ +package ratelimit + +import ( + "testing" +) + +func TestQueue(t *testing.T) { + const size = 3 + q := &queue{buf: make([]int64, size)} + + if q.len() != 0 { + t.Fatalf("q.len() = %d, expect 0", q.len()) + } + + if q.cap() != size { + t.Fatalf("q.cap() = %d, expect %d", q.cap(), size) + } + + for i := int64(0); i < int64(size); i++ { + err := q.push(i) + if err != nil { + t.Fatalf("cannot push element %d", i) + } + } + + if q.len() != size { + t.Fatalf("q.len() = %d, expect %d", q.len(), size) + } + + err := q.push(int64(size)) + if err != ErrRateLimitExceeded { + t.Fatalf("pushing element beyond capacity should have failed with err: %s, got %s", ErrRateLimitExceeded, err) + } + + if q.front() != 0 { + t.Fatalf("q.front() = %d, expect 0", q.front()) + } + + if q.back() != int64(size-1) { + t.Fatalf("q.back() = %d, expect %d", q.back(), size-1) + } + + popVal := q.pop() + if popVal != 0 { + t.Fatalf("q.pop() = %d, expect 0", popVal) + } + + if q.len() != size-1 { + t.Fatalf("q.len() = %d, expect %d", q.len(), size-1) + } + + // Testing truncation. + threshold := int64(1) + q.truncate(threshold) + if q.len() != 1 { + t.Fatalf("q.len() after truncate = %d, expect 1", q.len()) + } + if q.front() != 2 { + t.Fatalf("q.front() after truncate = %d, expect 2", q.front()) + } +} diff --git a/chain/sync.go b/chain/sync.go index 044f317d9..4dccc2036 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -844,7 +844,7 @@ loop: return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } if base.IsChildOf(knownParent) { - // common case: receiving a block thats potentially part of the same tipset as our best block + // common case: receiving a block that's potentially part of the same tipset as our best block return blockSet, nil } @@ -886,6 +886,35 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know } } + incomingParentsTsk := incoming.Parents() + commonParent := false + for _, incomingParent := range incomingParentsTsk.Cids() { + if known.Contains(incomingParent) { + commonParent = true + } + } + + if commonParent { + // known contains at least one of incoming's Parents => the common ancestor is known's Parents (incoming's Grandparents) + // in this case, we need to return {incoming.Parents()} + incomingParents, err := syncer.store.LoadTipSet(ctx, incomingParentsTsk) + if err != nil { + // fallback onto the network + tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), 1) + if err != nil { + return nil, xerrors.Errorf("failed to fetch incomingParents from the network: %w", err) + } + + if len(tips) == 0 { + return nil, xerrors.Errorf("network didn't return any tipsets") + } + + incomingParents = tips[0] + } + + return []*types.TipSet{incomingParents}, nil + } + // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes. // Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare? tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold)) diff --git a/chain/sync_test.go b/chain/sync_test.go index ec960d7d0..be7759603 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -11,7 +11,6 @@ import ( "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p/core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" @@ -344,13 +343,6 @@ func (tu *syncTestUtil) addClientNode() int { return len(tu.nds) - 1 } -func (tu *syncTestUtil) pid(n int) peer.ID { - nal, err := tu.nds[n].NetAddrsListen(tu.ctx) - require.NoError(tu.t, err) - - return nal.ID -} - func (tu *syncTestUtil) connect(from, to int) { toPI, err := tu.nds[to].NetAddrsListen(tu.ctx) require.NoError(tu.t, err) diff --git a/chain/types/blockmsg_test.go b/chain/types/blockmsg_test.go index 02a622768..ea20f64a7 100644 --- a/chain/types/blockmsg_test.go +++ b/chain/types/blockmsg_test.go @@ -7,9 +7,6 @@ import ( ) func TestDecodeBlockMsg(t *testing.T) { - type args struct { - b []byte - } tests := []struct { name string data []byte diff --git a/chain/types/ethtypes/eth_transactions.go b/chain/types/ethtypes/eth_transactions.go index 6c13c5bf6..a3b1d0150 100644 --- a/chain/types/ethtypes/eth_transactions.go +++ b/chain/types/ethtypes/eth_transactions.go @@ -62,9 +62,14 @@ type EthTxArgs struct { // - BlockHash // - BlockNumber // - TransactionIndex -// - From // - Hash func EthTxFromSignedEthMessage(smsg *types.SignedMessage) (EthTx, error) { + // The from address is always an f410f address, never an ID or other address. + if !IsEthAddress(smsg.Message.From) { + return EthTx{}, xerrors.Errorf("sender must be an eth account, was %s", smsg.Message.From) + } + + // Probably redundant, but we might as well check. if smsg.Signature.Type != typescrypto.SigTypeDelegated { return EthTx{}, xerrors.Errorf("signature is not delegated type, is type: %d", smsg.Signature.Type) } @@ -79,10 +84,18 @@ func EthTxFromSignedEthMessage(smsg *types.SignedMessage) (EthTx, error) { return EthTx{}, xerrors.Errorf("failed to recover signature: %w", err) } + from, err := EthAddressFromFilecoinAddress(smsg.Message.From) + if err != nil { + // This should be impossible as we've already asserted that we have an EthAddress + // sender... + return EthTx{}, xerrors.Errorf("sender was not an eth account") + } + return EthTx{ Nonce: EthUint64(txArgs.Nonce), ChainID: EthUint64(txArgs.ChainID), To: txArgs.To, + From: from, Value: EthBigInt(txArgs.Value), Type: Eip1559TxType, Gas: EthUint64(txArgs.GasLimit), diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index b796e6f56..b933329f4 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -799,6 +799,45 @@ func GetContractEthAddressFromCode(sender EthAddress, salt [32]byte, initcode [] return ethAddr, nil } +// EthEstimateGasParams handles raw jsonrpc params for eth_estimateGas +type EthEstimateGasParams struct { + Tx EthCall + BlkParam *EthBlockNumberOrHash +} + +func (e *EthEstimateGasParams) UnmarshalJSON(b []byte) error { + var params []json.RawMessage + err := json.Unmarshal(b, ¶ms) + if err != nil { + return err + } + + switch len(params) { + case 2: + err = json.Unmarshal(params[1], &e.BlkParam) + if err != nil { + return err + } + fallthrough + case 1: + err = json.Unmarshal(params[0], &e.Tx) + if err != nil { + return err + } + default: + return xerrors.Errorf("expected 1 or 2 params, got %d", len(params)) + } + + return nil +} + +func (e EthEstimateGasParams) MarshalJSON() ([]byte, error) { + if e.BlkParam != nil { + return json.Marshal([]interface{}{e.Tx, e.BlkParam}) + } + return json.Marshal([]interface{}{e.Tx}) +} + // EthFeeHistoryParams handles raw jsonrpc params for eth_feeHistory type EthFeeHistoryParams struct { BlkCount EthUint64 diff --git a/chain/types/fil.go b/chain/types/fil.go index 60a2940c6..2a0ccb460 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -12,6 +12,9 @@ import ( type FIL BigInt func (f FIL) String() string { + if f.Int == nil { + return "0 FIL" + } return f.Unitless() + " FIL" } diff --git a/chain/types/message_receipt_cbor.go b/chain/types/message_receipt_cbor.go index e1364e654..955ca4d85 100644 --- a/chain/types/message_receipt_cbor.go +++ b/chain/types/message_receipt_cbor.go @@ -140,7 +140,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -186,7 +186,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -278,7 +278,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -324,7 +324,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: diff --git a/chain/types/vmcontext.go b/chain/types/vmcontext.go index 83ad81315..bab9c213f 100644 --- a/chain/types/vmcontext.go +++ b/chain/types/vmcontext.go @@ -27,24 +27,3 @@ type StateTree interface { Version() StateTreeVersion } - -type storageWrapper struct { - s Storage -} - -func (sw *storageWrapper) Put(i cbg.CBORMarshaler) (cid.Cid, error) { - c, err := sw.s.Put(i) - if err != nil { - return cid.Undef, err - } - - return c, nil -} - -func (sw *storageWrapper) Get(c cid.Cid, out cbg.CBORUnmarshaler) error { - if err := sw.s.Get(c, out); err != nil { - return err - } - - return nil -} diff --git a/chain/vectors/gen/main.go b/chain/vectors/gen/main.go index ce9f1baf8..f4b7c82da 100644 --- a/chain/vectors/gen/main.go +++ b/chain/vectors/gen/main.go @@ -2,6 +2,7 @@ package main import ( "context" + crand "crypto/rand" "encoding/json" "fmt" "math/rand" @@ -145,7 +146,10 @@ func MakeUnsignedMessageVectors() []vectors.UnsignedMessageVector { } params := make([]byte, 32) - rand.Read(params) + _, err = crand.Read(params) + if err != nil { + panic(err) + } msg := &types.Message{ To: to, diff --git a/cli/helper.go b/cli/helper.go index 81a5bb033..fb1899e0a 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -1,6 +1,7 @@ package cli import ( + "errors" "fmt" "io" "os" @@ -8,7 +9,6 @@ import ( "syscall" ufcli "github.com/urfave/cli/v2" - "golang.org/x/xerrors" ) type PrintHelpErr struct { @@ -52,7 +52,7 @@ func RunApp(app *ufcli.App) { fmt.Fprintf(os.Stderr, "ERROR: %s\n\n", err) // nolint:errcheck } var phe *PrintHelpErr - if xerrors.As(err, &phe) { + if errors.As(err, &phe) { _ = ufcli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name) } os.Exit(1) diff --git a/cli/net.go b/cli/net.go index 516b44b7f..99ee92aef 100644 --- a/cli/net.go +++ b/cli/net.go @@ -282,7 +282,7 @@ var NetDisconnect = &cli.Command{ fmt.Println("failure") return err } - fmt.Printf("disconnect %s: ", pid.Pretty()) + fmt.Printf("disconnect %s: ", pid) err = api.NetDisconnect(ctx, pid) if err != nil { fmt.Println("failure") @@ -312,7 +312,7 @@ var NetConnect = &cli.Command{ } for _, pi := range pis { - fmt.Printf("connect %s: ", pi.ID.Pretty()) + fmt.Printf("connect %s: ", pi.ID) err := api.NetConnect(ctx, pi) if err != nil { fmt.Println("failure") @@ -847,7 +847,8 @@ var NetStatCmd = &cli.Command{ }) for _, stat := range stats { - printScope(&stat.stat, name+stat.name) + tmp := stat.stat + printScope(&tmp, name+stat.name) } } diff --git a/cli/state.go b/cli/state.go index 667f6fb19..31666a21c 100644 --- a/cli/state.go +++ b/cli/state.go @@ -1920,8 +1920,29 @@ var StateSysActorCIDsCmd = &cli.Command{ if err != nil { return err } - for name, cid := range actorsCids { - _, _ = fmt.Fprintf(tw, "%v\t%v\n", name, cid) + + var actorsCidTuples []struct { + actorName string + actorCid cid.Cid + } + + for name, actorCid := range actorsCids { + keyVal := struct { + actorName string + actorCid cid.Cid + }{ + actorName: name, + actorCid: actorCid, + } + actorsCidTuples = append(actorsCidTuples, keyVal) + } + + sort.Slice(actorsCidTuples, func(i, j int) bool { + return actorsCidTuples[i].actorName < actorsCidTuples[j].actorName + }) + + for _, keyVal := range actorsCidTuples { + _, _ = fmt.Fprintf(tw, "%v\t%v\n", keyVal.actorName, keyVal.actorCid) } return tw.Flush() }, diff --git a/cli/sync.go b/cli/sync.go index 89d2d94f0..18ff24bc2 100644 --- a/cli/sync.go +++ b/cli/sync.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + cliutil "github.com/filecoin-project/lotus/cli/util" ) var SyncCmd = &cli.Command{ @@ -262,6 +263,9 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error { } firstApp = state.VMApplied + // eta computes the ETA for the sync to complete (with a lookback of 10 processed items) + eta := cliutil.NewETA(10) + for { state, err := napi.SyncState(ctx) if err != nil { @@ -312,8 +316,10 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error { fmt.Print("\r\x1b[2K\x1b[A") } + todo := theight - ss.Height + fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff) - fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height) + fmt.Printf("State: %s; Current Epoch: %d; Todo: %d, ETA: %s\n", ss.Stage, ss.Height, todo, eta.Update(int64(todo))) lastLines = 2 if i%samples == 0 { diff --git a/cli/util/api.go b/cli/util/api.go index 1d6928c3f..3602b752d 100644 --- a/cli/util/api.go +++ b/cli/util/api.go @@ -119,7 +119,7 @@ func GetAPIInfoMulti(ctx *cli.Context, t repo.RepoType) ([]APIInfo, error) { } } - return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type()) + return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v. Try setting environment variable: %s", t.Type(), primaryEnv) } func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) { @@ -164,6 +164,28 @@ func GetRawAPIMulti(ctx *cli.Context, t repo.RepoType, version string) ([]HttpHe return httpHeads, nil } +func GetRawAPIMultiV2(ctx *cli.Context, ainfoCfg []string, version string) ([]HttpHead, error) { + var httpHeads []HttpHead + + if len(ainfoCfg) == 0 { + return httpHeads, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './lotus-provider config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './lotus-provider config set /tmp/base.toml'") + } + for _, i := range ainfoCfg { + ainfo := ParseApiInfo(i) + addr, err := ainfo.DialArgs(version) + if err != nil { + return httpHeads, xerrors.Errorf("could not get DialArgs: %w", err) + } + httpHeads = append(httpHeads, HttpHead{addr: addr, header: ainfo.AuthHeader()}) + } + + if IsVeryVerbose { + _, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, httpHeads[0].addr) + } + + return httpHeads, nil +} + func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) { heads, err := GetRawAPIMulti(ctx, t, version) if err != nil { @@ -393,6 +415,68 @@ func GetFullNodeAPIV1(ctx *cli.Context, opts ...GetFullNodeOption) (v1api.FullNo return &v1API, finalCloser, nil } +func GetFullNodeAPIV1LotusProvider(ctx *cli.Context, ainfoCfg []string, opts ...GetFullNodeOption) (v1api.FullNode, jsonrpc.ClientCloser, error) { + if tn, ok := ctx.App.Metadata["testnode-full"]; ok { + return tn.(v1api.FullNode), func() {}, nil + } + + var options GetFullNodeOptions + for _, opt := range opts { + opt(&options) + } + + var rpcOpts []jsonrpc.Option + if options.ethSubHandler != nil { + rpcOpts = append(rpcOpts, jsonrpc.WithClientHandler("Filecoin", options.ethSubHandler), jsonrpc.WithClientHandlerAlias("eth_subscription", "Filecoin.EthSubscription")) + } + + heads, err := GetRawAPIMultiV2(ctx, ainfoCfg, "v1") + if err != nil { + return nil, nil, err + } + + if IsVeryVerbose { + _, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", heads[0].addr) + } + + var fullNodes []api.FullNode + var closers []jsonrpc.ClientCloser + + for _, head := range heads { + v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header, rpcOpts...) + if err != nil { + log.Warnf("Not able to establish connection to node with addr: %s", head.addr) + continue + } + fullNodes = append(fullNodes, v1api) + closers = append(closers, closer) + } + + // When running in cluster mode and trying to establish connections to multiple nodes, fail + // if less than 2 lotus nodes are actually running + if len(heads) > 1 && len(fullNodes) < 2 { + return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node") + } + + finalCloser := func() { + for _, c := range closers { + c() + } + } + + var v1API api.FullNodeStruct + FullNodeProxy(fullNodes, &v1API) + + v, err := v1API.Version(ctx.Context) + if err != nil { + return nil, nil, err + } + if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) { + return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion) + } + return &v1API, finalCloser, nil +} + type GetStorageMinerOptions struct { PreferHttp bool } diff --git a/cli/util/eta.go b/cli/util/eta.go new file mode 100644 index 000000000..de06ec1ff --- /dev/null +++ b/cli/util/eta.go @@ -0,0 +1,94 @@ +package cliutil + +import ( + "fmt" + "math" + "time" +) + +// ETA implements a very simple eta calculator based on the number of remaining items. It does not +// require knowing the work size in advance and is therefore suitable for streaming workloads and +// also does not require that consecutive updates have a monotonically decreasing remaining value. +type ETA struct { + // max number of items to keep in memory + maxItems int + // a queue of most recently updated items + items []item + // we store the last calculated ETA which we reuse if there was not change in remaining items + lastETA string +} + +type item struct { + timestamp time.Time + remaining int64 +} + +// NewETA creates a new ETA calculator of the given size +func NewETA(maxItems int) *ETA { + return &ETA{ + maxItems: maxItems, + items: make([]item, 0), + } +} + +// Update updates the ETA calculator with the remaining number of items and returns the ETA +func (e *ETA) Update(remaining int64) string { + item := item{ + timestamp: time.Now(), + remaining: remaining, + } + + if len(e.items) == 0 { + e.items = append(e.items, item) + return "" + } + + if e.items[len(e.items)-1].remaining == remaining { + // we ignore updates with the same remaining value and just return the previous ETA + return e.lastETA + } else if e.items[len(e.items)-1].remaining < remaining { + // remaining went up from previous update, lets estimate how many items were processed using the + // average number processed items in the queue. + var avgProcessedPerItem int64 = 1 + if len(e.items) > 1 { + diffRemaining := e.items[0].remaining - e.items[len(e.items)-1].remaining + avgProcessedPerItem = int64(math.Round(float64(diffRemaining) / float64(len(e.items)))) + } + + // diff is the difference in increase in remaining since last update plus the average number of processed + // items we estimate that were processed this round + diff := remaining - e.items[len(e.items)-1].remaining + avgProcessedPerItem + + // we update all items in the queue by shifting their remaining value accordingly. This means that we + // always have strictly decreasing remaining values in the queue + for i := range e.items { + e.items[i].remaining += diff + } + } + + // append the item to the queue and remove the oldest item if needed + if len(e.items) >= e.maxItems { + e.items = e.items[1:] + } + e.items = append(e.items, item) + + // calculate the average processing time per item in the queue + diffMs := e.items[len(e.items)-1].timestamp.Sub(e.items[0].timestamp).Milliseconds() + nrItemsProcessed := e.items[0].remaining - e.items[len(e.items)-1].remaining + avg := diffMs / nrItemsProcessed + + // use that average processing time to estimate how long the remaining items will take + // and cache that ETA so we don't have to recalculate it on every update unless the + // remaining value changes + e.lastETA = msToETA(avg * remaining) + + return e.lastETA +} + +func msToETA(ms int64) string { + seconds := ms / 1000 + minutes := seconds / 60 + hours := minutes / 60 + + return fmt.Sprintf("%02dh:%02dm:%02ds", hours, minutes%60, seconds%60) +} diff --git a/cli/wallet.go b/cli/wallet.go index 628d6841d..faf7bc239 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/tablewriter" ) @@ -459,7 +460,12 @@ var walletSign = &cli.Command{ sig, err := api.WalletSign(ctx, addr, msg) if err != nil { - return err + // Check if the address is a multisig address + act, actErr := api.StateGetActor(ctx, addr, types.EmptyTSK) + if actErr == nil && builtin.IsMultisigActor(act.Code) { + return xerrors.Errorf("specified signer address is a multisig actor, it doesn’t have keys to sign transactions. To send a message with a multisig, signers of the multisig need to propose and approve transactions.") + } + return xerrors.Errorf("failed to sign message: %w", err) } sigBytes := append([]byte{byte(sig.Type)}, sig.Data...) diff --git a/cmd/lotus-bench/cli.go b/cmd/lotus-bench/cli.go new file mode 100644 index 000000000..0eaeb6ccb --- /dev/null +++ b/cmd/lotus-bench/cli.go @@ -0,0 +1,312 @@ +package main + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "os/signal" + "strconv" + "strings" + "sync" + "time" + + "github.com/urfave/cli/v2" +) + +var cliCmd = &cli.Command{ + Name: "cli", + Usage: "Runs a concurrent stress test on one or more binaries commands and prints the performance metrics including latency distribution and histogram", + Description: `This benchmark has the following features: +* Can query each command both sequentially and concurrently +* Supports rate limiting +* Can query multiple different commands at once (supporting different concurrency level and rate limiting for each command) +* Gives a nice reporting summary of the stress testing of each command (including latency distribution, histogram and more) +* Easy to use + +To use this benchmark you must specify the commands you want to test using the --cmd options, the format of it is: + + --cmd=CMD[:CONCURRENCY][:QPS] where only NAME is required. + +Here are some real examples: + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages' // runs the command with default concurrency and qps + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages:3' // override concurrency to 3 + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages::100' // override to 100 qps while using default concurrency + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages:3:100' // run using 3 workers but limit to 100 qps + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages' --cmd='lotus sync wait' // run two commands at once +`, + Flags: []cli.Flag{ + &cli.DurationFlag{ + Name: "duration", + Value: 60 * time.Second, + Usage: "Duration of benchmark in seconds", + }, + &cli.IntFlag{ + Name: "concurrency", + Value: 10, + Usage: "How many workers should be used per command (can be overridden per command)", + }, + &cli.IntFlag{ + Name: "qps", + Value: 0, + Usage: "How many requests per second should be sent per command (can be overridden per command), a value of 0 means no limit", + }, + &cli.StringSliceFlag{ + Name: "cmd", + Usage: `Command to benchmark, you can specify multiple commands by repeating this flag. You can also specify command specific options to set the concurrency and qps for each command (see usage).`, + }, + &cli.DurationFlag{ + Name: "watch", + Value: 0 * time.Second, + Usage: "If >0 then generates reports every N seconds (only supports linux/unix)", + }, + &cli.BoolFlag{ + Name: "print-response", + Value: false, + Usage: "print the response of each request", + }, + }, + Action: func(cctx *cli.Context) error { + if len(cctx.StringSlice("cmd")) == 0 { + return errors.New("you must specify and least one cmd to benchmark") + } + + var cmds []*CMD + for _, str := range cctx.StringSlice("cmd") { + entries := strings.SplitN(str, ":", 3) + if len(entries) == 0 { + return errors.New("invalid cmd format") + } + + // check if concurrency was specified + concurrency := cctx.Int("concurrency") + if len(entries) > 1 { + if len(entries[1]) > 0 { + var err error + concurrency, err = strconv.Atoi(entries[1]) + if err != nil { + return fmt.Errorf("could not parse concurrency value from command %s: %v", entries[0], err) + } + } + } + + // check if qps was specified + qps := cctx.Int("qps") + if len(entries) > 2 { + if len(entries[2]) > 0 { + var err error + qps, err = strconv.Atoi(entries[2]) + if err != nil { + return fmt.Errorf("could not parse qps value from command %s: %v", entries[0], err) + } + } + } + + cmds = append(cmds, &CMD{ + w: os.Stdout, + cmd: entries[0], + concurrency: concurrency, + qps: qps, + printResp: cctx.Bool("print-response"), + }) + } + + // terminate early on ctrl+c + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + <-c + fmt.Println("Received interrupt, stopping...") + for _, cmd := range cmds { + cmd.Stop() + } + }() + + // stop all threads after duration + go func() { + time.Sleep(cctx.Duration("duration")) + for _, cmd := range cmds { + cmd.Stop() + } + }() + + // start all threads + var wg sync.WaitGroup + wg.Add(len(cmds)) + + for _, cmd := range cmds { + go func(cmd *CMD) { + defer wg.Done() + err := cmd.Run() + if err != nil { + fmt.Printf("error running cmd: %v\n", err) + } + }(cmd) + } + + // if watch is set then print a report every N seconds + var progressCh chan struct{} + if cctx.Duration("watch") > 0 { + progressCh = make(chan struct{}, 1) + go func(progressCh chan struct{}) { + ticker := time.NewTicker(cctx.Duration("watch")) + for { + clearAndPrintReport := func() { + // clear the screen move the cursor to the top left + fmt.Print("\033[2J") + fmt.Printf("\033[%d;%dH", 1, 1) + for i, cmd := range cmds { + cmd.Report() + if i < len(cmds)-1 { + fmt.Println() + } + } + } + select { + case <-ticker.C: + clearAndPrintReport() + case <-progressCh: + clearAndPrintReport() + return + } + } + }(progressCh) + } + + wg.Wait() + + if progressCh != nil { + // wait for the watch go routine to return + progressCh <- struct{}{} + + // no need to print the report again + return nil + } + + // print the report for each command + for i, cmd := range cmds { + cmd.Report() + if i < len(cmds)-1 { + fmt.Println() + } + } + + return nil + }, +} + +// CMD handles the benchmarking of a single command. +type CMD struct { + w io.Writer + // the cmd we want to benchmark + cmd string + // the number of concurrent requests to make to this command + concurrency int + // if >0 then limit to qps is the max number of requests per second to make to this command (0 = no limit) + qps int + // whether or not to print the response of each request (useful for debugging) + printResp bool + // instruct the worker go routines to stop + stopCh chan struct{} + // when the command bencharking started + start time.Time + // results channel is used by the workers to send results to the reporter + results chan *result + // reporter handles reading the results from workers and printing the report statistics + reporter *Reporter +} + +func (c *CMD) Run() error { + var wg sync.WaitGroup + wg.Add(c.concurrency) + + c.results = make(chan *result, c.concurrency*1_000) + c.stopCh = make(chan struct{}, c.concurrency) + + go func() { + c.reporter = NewReporter(c.results, c.w) + c.reporter.Run() + }() + + c.start = time.Now() + + // throttle the number of requests per second + var qpsTicker *time.Ticker + if c.qps > 0 { + qpsTicker = time.NewTicker(time.Second / time.Duration(c.qps)) + } + + for i := 0; i < c.concurrency; i++ { + go func() { + c.startWorker(qpsTicker) + wg.Done() + }() + } + wg.Wait() + + // close the results channel so reporter will stop + close(c.results) + + // wait until the reporter is done + <-c.reporter.doneCh + + return nil +} + +func (c *CMD) startWorker(qpsTicker *time.Ticker) { + for { + // check if we should stop + select { + case <-c.stopCh: + return + default: + } + + // wait for the next tick if we are rate limiting this command + if qpsTicker != nil { + <-qpsTicker.C + } + + start := time.Now() + + var statusCode int = 0 + + arr := strings.Fields(c.cmd) + + data, err := exec.Command(arr[0], arr[1:]...).Output() + if err != nil { + fmt.Println("1") + if exitError, ok := err.(*exec.ExitError); ok { + statusCode = exitError.ExitCode() + } else { + statusCode = 1 + } + } else { + if c.printResp { + fmt.Printf("[%s] %s", c.cmd, string(data)) + } + } + + c.results <- &result{ + statusCode: &statusCode, + err: err, + duration: time.Since(start), + } + } +} + +func (c *CMD) Stop() { + for i := 0; i < c.concurrency; i++ { + c.stopCh <- struct{}{} + } +} + +func (c *CMD) Report() { + total := time.Since(c.start) + fmt.Fprintf(c.w, "[%s]:\n", c.cmd) + fmt.Fprintf(c.w, "- Options:\n") + fmt.Fprintf(c.w, " - concurrency: %d\n", c.concurrency) + fmt.Fprintf(c.w, " - qps: %d\n", c.qps) + c.reporter.Print(total, c.w) +} diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 9f43d9538..16adbad60 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -304,7 +304,7 @@ var importBenchCmd = &cli.Command{ return fmt.Errorf("no CAR file provided for import") } - head, err = cs.Import(cctx.Context, carFile) + head, _, err = cs.Import(cctx.Context, carFile) if err != nil { return err } @@ -497,21 +497,6 @@ type Invocation struct { const GasPerNs = 10 -func countGasCosts(et *types.ExecutionTrace) int64 { - var cgas int64 - - for _, gc := range et.GasCharges { - cgas += gc.ComputeGas - } - - for _, sub := range et.Subcalls { - c := countGasCosts(&sub) //nolint - cgas += c - } - - return cgas -} - type stats struct { timeTaken meanVar gasRatio meanVar diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index b70709ff2..7d3c0cde0 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -3,10 +3,10 @@ package main import ( "bytes" "context" + "crypto/rand" "encoding/json" "fmt" "math/big" - "math/rand" "os" "path/filepath" "sync" @@ -120,6 +120,7 @@ func main() { sealBenchCmd, simpleCmd, importBenchCmd, + cliCmd, rpcCmd, }, } @@ -546,7 +547,10 @@ var sealBenchCmd = &cli.Command{ } var challenge [32]byte - rand.Read(challenge[:]) + _, err = rand.Read(challenge[:]) + if err != nil { + return err + } beforePost := time.Now() @@ -776,9 +780,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par start := time.Now() log.Infof("[%d] Writing piece into sector...", i) - r := rand.New(rand.NewSource(100 + int64(i))) - - pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), r) + pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), rand.Reader) if err != nil { return nil, nil, err } diff --git a/cmd/lotus-bench/reporter.go b/cmd/lotus-bench/reporter.go new file mode 100644 index 000000000..ad2ad6b9d --- /dev/null +++ b/cmd/lotus-bench/reporter.go @@ -0,0 +1,181 @@ +package main + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" + "text/tabwriter" + "time" +) + +// result is the result of a single rpc method request. +type result struct { + err error + statusCode *int + duration time.Duration +} + +// Reporter reads the results from the workers through the results channel and aggregates the results. +type Reporter struct { + // write the report to this writer + w io.Writer + // the reporter read the results from this channel + results chan *result + // doneCh is used to signal that the reporter has finished reading the results (channel has closed) + doneCh chan bool + + // lock protect the following fields during critical sections (if --watch was specified) + lock sync.Mutex + // the latencies of all requests + latencies []int64 + // the number of requests that returned each status code + statusCodes map[int]int + // the number of errors that occurred + errors map[string]int +} + +func NewReporter(results chan *result, w io.Writer) *Reporter { + return &Reporter{ + w: w, + results: results, + doneCh: make(chan bool, 1), + statusCodes: make(map[int]int), + errors: make(map[string]int), + } +} + +func (r *Reporter) Run() { + for res := range r.results { + r.lock.Lock() + + r.latencies = append(r.latencies, res.duration.Milliseconds()) + + if res.statusCode != nil { + r.statusCodes[*res.statusCode]++ + } + + if res.err != nil { + if len(r.errors) < 1_000_000 { + r.errors[res.err.Error()]++ + } else { + // we don't want to store too many errors in memory + r.errors["hidden"]++ + } + } else { + r.errors["nil"]++ + } + + r.lock.Unlock() + } + + r.doneCh <- true +} + +func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { + r.lock.Lock() + defer r.lock.Unlock() + + nrReq := int64(len(r.latencies)) + if nrReq == 0 { + fmt.Println("No requests were made") + return + } + + // we need to sort the latencies slice to calculate the percentiles + sort.Slice(r.latencies, func(i, j int) bool { + return r.latencies[i] < r.latencies[j] + }) + + var totalLatency int64 = 0 + for _, latency := range r.latencies { + totalLatency += latency + } + + fmt.Fprintf(w, "- Total Requests: %d\n", nrReq) + fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds()) + fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds()) + fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq) + fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2]) + fmt.Fprintf(w, "- Latency distribution:\n") + percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999} + for _, p := range percentiles { + idx := int64(p * float64(nrReq)) + fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx]) + } + + // create a simple histogram with 10 buckets spanning the range of latency + // into equal ranges + // + nrBucket := 10 + buckets := make([]Bucket, nrBucket) + latencyRange := r.latencies[len(r.latencies)-1] + bucketRange := latencyRange / int64(nrBucket) + + // mark the end of each bucket + for i := 0; i < nrBucket; i++ { + buckets[i].start = int64(i) * bucketRange + buckets[i].end = buckets[i].start + bucketRange + // extend the last bucked by any remaning range caused by the integer division + if i == nrBucket-1 { + buckets[i].end = latencyRange + } + } + + // count the number of requests in each bucket + currBucket := 0 + for i := 0; i < len(r.latencies); { + if r.latencies[i] <= buckets[currBucket].end { + buckets[currBucket].cnt++ + i++ + } else { + currBucket++ + } + } + + // print the histogram using a tabwriter which will align the columns nicely + fmt.Fprintf(w, "- Histogram:\n") + const padding = 2 + tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug) + for i := 0; i < nrBucket; i++ { + ratio := float64(buckets[i].cnt) / float64(nrReq) + bars := strings.Repeat("#", int(ratio*100)) + fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100)) + } + tabWriter.Flush() //nolint:errcheck + + fmt.Fprintf(w, "- Status codes:\n") + for code, cnt := range r.statusCodes { + fmt.Fprintf(w, " [%d]: %d\n", code, cnt) + } + + // print the 10 most occurring errors (in case error values are not unique) + // + type kv struct { + err string + cnt int + } + var sortedErrors []kv + for err, cnt := range r.errors { + sortedErrors = append(sortedErrors, kv{err, cnt}) + } + sort.Slice(sortedErrors, func(i, j int) bool { + return sortedErrors[i].cnt > sortedErrors[j].cnt + }) + fmt.Fprintf(w, "- Errors (top 10):\n") + for i, se := range sortedErrors { + if i > 10 { + break + } + fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt) + } +} + +type Bucket struct { + start int64 + // the end value of the bucket + end int64 + // how many entries are in the bucket + cnt int +} diff --git a/cmd/lotus-bench/rpc.go b/cmd/lotus-bench/rpc.go index 5da784c6e..4af4bdb27 100644 --- a/cmd/lotus-bench/rpc.go +++ b/cmd/lotus-bench/rpc.go @@ -9,11 +9,9 @@ import ( "net/http" "os" "os/signal" - "sort" "strconv" "strings" "sync" - "text/tabwriter" "time" "github.com/urfave/cli/v2" @@ -243,13 +241,6 @@ type RPCMethod struct { reporter *Reporter } -// result is the result of a single rpc method request. -type result struct { - err error - statusCode *int - duration time.Duration -} - func (rpc *RPCMethod) Run() error { client := &http.Client{ Timeout: 0, @@ -411,166 +402,3 @@ func (rpc *RPCMethod) Report() { fmt.Fprintf(rpc.w, " - qps: %d\n", rpc.qps) rpc.reporter.Print(total, rpc.w) } - -// Reporter reads the results from the workers through the results channel and aggregates the results. -type Reporter struct { - // write the report to this writer - w io.Writer - // the reporter read the results from this channel - results chan *result - // doneCh is used to signal that the reporter has finished reading the results (channel has closed) - doneCh chan bool - - // lock protect the following fields during critical sections (if --watch was specified) - lock sync.Mutex - // the latencies of all requests - latencies []int64 - // the number of requests that returned each status code - statusCodes map[int]int - // the number of errors that occurred - errors map[string]int -} - -func NewReporter(results chan *result, w io.Writer) *Reporter { - return &Reporter{ - w: w, - results: results, - doneCh: make(chan bool, 1), - statusCodes: make(map[int]int), - errors: make(map[string]int), - } -} - -func (r *Reporter) Run() { - for res := range r.results { - r.lock.Lock() - - r.latencies = append(r.latencies, res.duration.Milliseconds()) - - if res.statusCode != nil { - r.statusCodes[*res.statusCode]++ - } - - if res.err != nil { - if len(r.errors) < 1_000_000 { - r.errors[res.err.Error()]++ - } else { - // we don't want to store too many errors in memory - r.errors["hidden"]++ - } - } else { - r.errors["nil"]++ - } - - r.lock.Unlock() - } - - r.doneCh <- true -} - -func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { - r.lock.Lock() - defer r.lock.Unlock() - - nrReq := int64(len(r.latencies)) - if nrReq == 0 { - fmt.Println("No requests were made") - return - } - - // we need to sort the latencies slice to calculate the percentiles - sort.Slice(r.latencies, func(i, j int) bool { - return r.latencies[i] < r.latencies[j] - }) - - var totalLatency int64 = 0 - for _, latency := range r.latencies { - totalLatency += latency - } - - fmt.Fprintf(w, "- Total Requests: %d\n", nrReq) - fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds()) - fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds()) - fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq) - fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2]) - fmt.Fprintf(w, "- Latency distribution:\n") - percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999} - for _, p := range percentiles { - idx := int64(p * float64(nrReq)) - fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx]) - } - - // create a simple histogram with 10 buckets spanning the range of latency - // into equal ranges - // - nrBucket := 10 - buckets := make([]Bucket, nrBucket) - latencyRange := r.latencies[len(r.latencies)-1] - bucketRange := latencyRange / int64(nrBucket) - - // mark the end of each bucket - for i := 0; i < nrBucket; i++ { - buckets[i].start = int64(i) * bucketRange - buckets[i].end = buckets[i].start + bucketRange - // extend the last bucked by any remaning range caused by the integer division - if i == nrBucket-1 { - buckets[i].end = latencyRange - } - } - - // count the number of requests in each bucket - currBucket := 0 - for i := 0; i < len(r.latencies); { - if r.latencies[i] <= buckets[currBucket].end { - buckets[currBucket].cnt++ - i++ - } else { - currBucket++ - } - } - - // print the histogram using a tabwriter which will align the columns nicely - fmt.Fprintf(w, "- Histogram:\n") - const padding = 2 - tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug) - for i := 0; i < nrBucket; i++ { - ratio := float64(buckets[i].cnt) / float64(nrReq) - bars := strings.Repeat("#", int(ratio*100)) - fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100)) - } - tabWriter.Flush() //nolint:errcheck - - fmt.Fprintf(w, "- Status codes:\n") - for code, cnt := range r.statusCodes { - fmt.Fprintf(w, " [%d]: %d\n", code, cnt) - } - - // print the 10 most occurring errors (in case error values are not unique) - // - type kv struct { - err string - cnt int - } - var sortedErrors []kv - for err, cnt := range r.errors { - sortedErrors = append(sortedErrors, kv{err, cnt}) - } - sort.Slice(sortedErrors, func(i, j int) bool { - return sortedErrors[i].cnt > sortedErrors[j].cnt - }) - fmt.Fprintf(w, "- Errors (top 10):\n") - for i, se := range sortedErrors { - if i > 10 { - break - } - fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt) - } -} - -type Bucket struct { - start int64 - // the end value of the bucket - end int64 - // how many entries are in the bucket - cnt int -} diff --git a/cmd/lotus-bench/simple.go b/cmd/lotus-bench/simple.go index 8ae5713ad..35d909ffb 100644 --- a/cmd/lotus-bench/simple.go +++ b/cmd/lotus-bench/simple.go @@ -266,7 +266,10 @@ var simplePreCommit1 = &cli.Command{ ProofType: spt(sectorSize, cctx.Bool("synthetic")), } - var ticket [32]byte // all zero + ticket := [32]byte{} + for i := range ticket { + ticket[i] = 1 + } pieces, err := ParsePieceInfos(cctx, 3) if err != nil { @@ -305,7 +308,36 @@ var simplePreCommit2 = &cli.Command{ Name: "synthetic", Usage: "generate synthetic PoRep proofs", }, + &cli.StringFlag{ + Name: "external-pc2", + Usage: "command for computing PC2 externally", + }, }, + Description: `Compute PreCommit2 inputs and seal a sector. + +--external-pc2 can be used to compute the PreCommit2 inputs externally. +The flag behaves similarly to the related lotus-worker flag, using it in +lotus-bench may be useful for testing if the external PreCommit2 command is +invoked correctly. + +The command will be called with a number of environment variables set: +* EXTSEAL_PC2_SECTOR_NUM: the sector number +* EXTSEAL_PC2_SECTOR_MINER: the miner id +* EXTSEAL_PC2_PROOF_TYPE: the proof type +* EXTSEAL_PC2_SECTOR_SIZE: the sector size in bytes +* EXTSEAL_PC2_CACHE: the path to the cache directory +* EXTSEAL_PC2_SEALED: the path to the sealed sector file (initialized with unsealed data by the caller) +* EXTSEAL_PC2_PC1OUT: output from rust-fil-proofs precommit1 phase (base64 encoded json) + +The command is expected to: +* Create cache sc-02-data-tree-r* files +* Create cache sc-02-data-tree-c* files +* Create cache p_aux / t_aux files +* Transform the sealed file in place + +Example invocation of lotus-bench as external executor: +'./lotus-bench simple precommit2 --sector-size $EXTSEAL_PC2_SECTOR_SIZE $EXTSEAL_PC2_SEALED $EXTSEAL_PC2_CACHE $EXTSEAL_PC2_PC1OUT' +`, ArgsUsage: "[sealed] [cache] [pc1 out]", Action: func(cctx *cli.Context) error { ctx := cctx.Context @@ -330,7 +362,18 @@ var simplePreCommit2 = &cli.Command{ storiface.FTSealed: cctx.Args().Get(0), storiface.FTCache: cctx.Args().Get(1), } - sealer, err := ffiwrapper.New(pp) + + var opts []ffiwrapper.FFIWrapperOpt + + if cctx.IsSet("external-pc2") { + extSeal := ffiwrapper.ExternalSealer{ + PreCommit2: ffiwrapper.MakeExternPrecommit2(cctx.String("external-pc2")), + } + + opts = append(opts, ffiwrapper.WithExternalSealCalls(extSeal)) + } + + sealer, err := ffiwrapper.New(pp, opts...) if err != nil { return err } @@ -420,7 +463,12 @@ var simpleCommit1 = &cli.Command{ start := time.Now() - var ticket, seed [32]byte // all zero + ticket := [32]byte{} + seed := [32]byte{} + for i := range ticket { + ticket[i] = 1 + seed[i] = 1 + } commd, err := cid.Parse(cctx.Args().Get(2)) if err != nil { @@ -650,6 +698,10 @@ var simpleWinningPost = &cli.Command{ Usage: "pass miner address (only necessary if using existing sectorbuilder)", Value: "t01000", }, + &cli.BoolFlag{ + Name: "show-inputs", + Usage: "output inputs for winning post generation", + }, }, ArgsUsage: "[sealed] [cache] [comm R] [sector num]", Action: func(cctx *cli.Context) error { @@ -720,6 +772,17 @@ var simpleWinningPost = &cli.Command{ fmt.Printf("Vanilla %s (%s)\n", challenge.Sub(start), bps(sectorSize, 1, challenge.Sub(start))) fmt.Printf("Proof %s (%s)\n", end.Sub(challenge), bps(sectorSize, 1, end.Sub(challenge))) fmt.Println(base64.StdEncoding.EncodeToString(proof[0].ProofBytes)) + + if cctx.Bool("show-inputs") { + fmt.Println("GenerateWinningPoStWithVanilla info:") + + fmt.Printf(" wpt: %d\n", wpt) + fmt.Printf(" mid: %d\n", mid) + fmt.Printf(" rand: %x\n", rand) + fmt.Printf(" vp: %x\n", vp) + fmt.Printf(" proof: %x\n", proof) + } + return nil }, } diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go index 2023551ef..35a43e18b 100644 --- a/cmd/lotus-gateway/main.go +++ b/cmd/lotus-gateway/main.go @@ -143,7 +143,7 @@ var runCmd = &cli.Command{ }, &cli.DurationFlag{ Name: "rate-limit-timeout", - Usage: "the maximum time to wait for the rate limter before returning an error to clients", + Usage: "the maximum time to wait for the rate limiter before returning an error to clients", Value: gateway.DefaultRateLimitTimeout, }, &cli.Int64Flag{ diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index c109e85b9..1b76960e9 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -463,7 +463,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix)) smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix)) - si := paths.NewIndex(nil) + si := paths.NewMemIndex(nil) lstor, err := paths.NewLocal(ctx, lr, si, nil) if err != nil { diff --git a/cmd/lotus-miner/proving.go b/cmd/lotus-miner/proving.go index 3ecc58ba7..2fc1427b5 100644 --- a/cmd/lotus-miner/proving.go +++ b/cmd/lotus-miner/proving.go @@ -559,7 +559,8 @@ var provingCheckProvableCmd = &cli.Command{ for parIdx, par := range partitions { sectors := make(map[abi.SectorNumber]struct{}) - sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK) + tmp := par.LiveSectors + sectorInfos, err := api.StateMinerSectors(ctx, addr, &tmp, types.EmptyTSK) if err != nil { return err } diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 6fd0fd709..07cc2e795 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -2566,6 +2566,8 @@ var sectorsUnsealCmd = &cli.Command{ return xerrors.Errorf("could not parse sector number: %w", err) } + fmt.Printf("Unsealing sector %d\n", sectorNum) + return minerAPI.SectorUnseal(ctx, abi.SectorNumber(sectorNum)) }, } diff --git a/cmd/lotus-provider/config.go b/cmd/lotus-provider/config.go new file mode 100644 index 000000000..5bd681429 --- /dev/null +++ b/cmd/lotus-provider/config.go @@ -0,0 +1,259 @@ +package main + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io" + "os" + "path" + "strings" + + "github.com/BurntSushi/toml" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/lib/harmony/harmonydb" + "github.com/filecoin-project/lotus/node/config" +) + +var configCmd = &cli.Command{ + Name: "config", + Usage: "Manage node config by layers. The layer 'base' will always be applied. ", + Subcommands: []*cli.Command{ + configDefaultCmd, + configSetCmd, + configGetCmd, + configListCmd, + configViewCmd, + configRmCmd, + configMigrateCmd, + }, +} + +var configDefaultCmd = &cli.Command{ + Name: "default", + Aliases: []string{"defaults"}, + Usage: "Print default node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + comment := !cctx.Bool("no-comment") + cfg, err := getDefaultConfig(comment) + if err != nil { + return err + } + fmt.Print(cfg) + + return nil + }, +} + +func getDefaultConfig(comment bool) (string, error) { + c := config.DefaultLotusProvider() + cb, err := config.ConfigUpdate(c, nil, config.Commented(comment), config.DefaultKeepUncommented(), config.NoEnv()) + if err != nil { + return "", err + } + return string(cb), nil +} + +var configSetCmd = &cli.Command{ + Name: "set", + Aliases: []string{"add", "update", "create"}, + Usage: "Set a config layer or the base by providing a filename or stdin.", + ArgsUsage: "a layer's file name", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "title", + Usage: "title of the config layer (req'd for stdin)", + }, + }, + Action: func(cctx *cli.Context) error { + args := cctx.Args() + + db, err := makeDB(cctx) + if err != nil { + return err + } + + name := cctx.String("title") + var stream io.Reader = os.Stdin + if args.Len() != 1 { + if cctx.String("title") == "" { + return errors.New("must have a title for stdin, or a file name") + } + } else { + stream, err = os.Open(args.First()) + if err != nil { + return fmt.Errorf("cannot open file %s: %w", args.First(), err) + } + if name == "" { + name = strings.Split(path.Base(args.First()), ".")[0] + } + } + bytes, err := io.ReadAll(stream) + if err != nil { + return fmt.Errorf("cannot read stream/file %w", err) + } + + lp := config.DefaultLotusProvider() // ensure it's toml + _, err = toml.Decode(string(bytes), lp) + if err != nil { + return fmt.Errorf("cannot decode file: %w", err) + } + _ = lp + + _, err = db.Exec(context.Background(), + `INSERT INTO harmony_config (title, config) VALUES ($1, $2) + ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, string(bytes)) + if err != nil { + return fmt.Errorf("unable to save config layer: %w", err) + } + + fmt.Println("Layer " + name + " created/updated") + return nil + }, +} + +var configGetCmd = &cli.Command{ + Name: "get", + Aliases: []string{"cat", "show"}, + Usage: "Get a config layer by name. You may want to pipe the output to a file, or use 'less'", + ArgsUsage: "layer name", + Action: func(cctx *cli.Context) error { + args := cctx.Args() + if args.Len() != 1 { + return fmt.Errorf("want 1 layer arg, got %d", args.Len()) + } + db, err := makeDB(cctx) + if err != nil { + return err + } + + var cfg string + err = db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, args.First()).Scan(&cfg) + if err != nil { + return err + } + fmt.Println(cfg) + + return nil + }, +} + +var configListCmd = &cli.Command{ + Name: "list", + Aliases: []string{"ls"}, + Usage: "List config layers you can get.", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + db, err := makeDB(cctx) + if err != nil { + return err + } + var res []string + err = db.Select(context.Background(), &res, `SELECT title FROM harmony_config ORDER BY title`) + if err != nil { + return fmt.Errorf("unable to read from db: %w", err) + } + for _, r := range res { + fmt.Println(r) + } + + return nil + }, +} + +var configRmCmd = &cli.Command{ + Name: "remove", + Aliases: []string{"rm", "del", "delete"}, + Usage: "Remove a named config layer.", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + args := cctx.Args() + if args.Len() != 1 { + return errors.New("must have exactly 1 arg for the layer name") + } + db, err := makeDB(cctx) + if err != nil { + return err + } + ct, err := db.Exec(context.Background(), `DELETE FROM harmony_config WHERE title=$1`, args.First()) + if err != nil { + return fmt.Errorf("unable to read from db: %w", err) + } + if ct == 0 { + return fmt.Errorf("no layer named %s", args.First()) + } + + return nil + }, +} +var configViewCmd = &cli.Command{ + Name: "interpret", + Aliases: []string{"view", "stacked", "stack"}, + Usage: "Interpret stacked config layers by this version of lotus-provider, with system-generated comments.", + ArgsUsage: "a list of layers to be interpreted as the final config", + Flags: []cli.Flag{ + &cli.StringSliceFlag{ + Name: "layers", + Usage: "comma or space separated list of layers to be interpreted", + Value: cli.NewStringSlice("base"), + Required: true, + }, + }, + Action: func(cctx *cli.Context) error { + db, err := makeDB(cctx) + if err != nil { + return err + } + lp, err := getConfig(cctx, db) + if err != nil { + return err + } + cb, err := config.ConfigUpdate(lp, config.DefaultLotusProvider(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) + if err != nil { + return xerrors.Errorf("cannot interpret config: %w", err) + } + fmt.Println(string(cb)) + return nil + }, +} + +func getConfig(cctx *cli.Context, db *harmonydb.DB) (*config.LotusProviderConfig, error) { + lp := config.DefaultLotusProvider() + have := []string{} + layers := cctx.StringSlice("layers") + for _, layer := range layers { + text := "" + err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) + if err != nil { + if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { + return nil, fmt.Errorf("missing layer '%s' ", layer) + } + if layer == "base" { + return nil, errors.New(`lotus-provider defaults to a layer named 'base'. + Either use 'migrate' command or edit a base.toml and upload it with: lotus-provider config set base.toml`) + } + return nil, fmt.Errorf("could not read layer '%s': %w", layer, err) + } + meta, err := toml.Decode(text, &lp) + if err != nil { + return nil, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err) + } + for _, k := range meta.Keys() { + have = append(have, strings.Join(k, " ")) + } + } + _ = have // FUTURE: verify that required fields are here. + // If config includes 3rd-party config, consider JSONSchema as a way that + // 3rd-parties can dynamically include config requirements and we can + // validate the config. Because of layering, we must validate @ startup. + return lp, nil +} diff --git a/cmd/lotus-provider/main.go b/cmd/lotus-provider/main.go new file mode 100644 index 000000000..19cc6f5f9 --- /dev/null +++ b/cmd/lotus-provider/main.go @@ -0,0 +1,160 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "runtime/debug" + "syscall" + + "github.com/fatih/color" + logging "github.com/ipfs/go-log/v2" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/build" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/lib/lotuslog" + "github.com/filecoin-project/lotus/lib/tracing" + "github.com/filecoin-project/lotus/node/repo" +) + +var log = logging.Logger("main") + +func SetupCloseHandler() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\r- Ctrl+C pressed in Terminal") + debug.PrintStack() + os.Exit(1) + }() +} + +func main() { + SetupCloseHandler() + + lotuslog.SetupLogLevels() + + local := []*cli.Command{ + //initCmd, + runCmd, + stopCmd, + configCmd, + testCmd, + //backupCmd, + //lcli.WithCategory("chain", actorCmd), + //lcli.WithCategory("storage", sectorsCmd), + //lcli.WithCategory("storage", provingCmd), + //lcli.WithCategory("storage", storageCmd), + //lcli.WithCategory("storage", sealingCmd), + } + + jaeger := tracing.SetupJaegerTracing("lotus") + defer func() { + if jaeger != nil { + _ = jaeger.ForceFlush(context.Background()) + } + }() + + for _, cmd := range local { + cmd := cmd + originBefore := cmd.Before + cmd.Before = func(cctx *cli.Context) error { + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } + jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) + + if cctx.IsSet("color") { + color.NoColor = !cctx.Bool("color") + } + + if originBefore != nil { + return originBefore(cctx) + } + + return nil + } + } + + app := &cli.App{ + Name: "lotus-provider", + Usage: "Filecoin decentralized storage network provider", + Version: build.UserVersion(), + EnableBashCompletion: true, + Flags: []cli.Flag{ + &cli.BoolFlag{ + // examined in the Before above + Name: "color", + Usage: "use color in display output", + DefaultText: "depends on output being a TTY", + }, + &cli.StringFlag{ + Name: "panic-reports", + EnvVars: []string{"LOTUS_PANIC_REPORT_PATH"}, + Hidden: true, + Value: "~/.lotusprovider", // should follow --repo default + }, + &cli.StringFlag{ + Name: "db-host", + EnvVars: []string{"LOTUS_DB_HOST"}, + Usage: "Command separated list of hostnames for yugabyte cluster", + Value: "yugabyte", + }, + &cli.StringFlag{ + Name: "db-name", + EnvVars: []string{"LOTUS_DB_NAME", "LOTUS_HARMONYDB_HOSTS"}, + Value: "yugabyte", + }, + &cli.StringFlag{ + Name: "db-user", + EnvVars: []string{"LOTUS_DB_USER", "LOTUS_HARMONYDB_USERNAME"}, + Value: "yugabyte", + }, + &cli.StringFlag{ + Name: "db-password", + EnvVars: []string{"LOTUS_DB_PASSWORD", "LOTUS_HARMONYDB_PASSWORD"}, + Value: "yugabyte", + }, + &cli.StringFlag{ + Name: "db-port", + EnvVars: []string{"LOTUS_DB_PORT", "LOTUS_HARMONYDB_PORT"}, + Hidden: true, + Value: "5433", + }, + &cli.StringFlag{ + Name: "layers", + EnvVars: []string{"LOTUS_LAYERS", "LOTUS_CONFIG_LAYERS"}, + Value: "base", + }, + &cli.StringFlag{ + Name: FlagRepoPath, + EnvVars: []string{"LOTUS_REPO_PATH"}, + Value: "~/.lotusprovider", + }, + cliutil.FlagVeryVerbose, + }, + Commands: append(local, lcli.CommonCommands...), + Before: func(c *cli.Context) error { + return nil + }, + After: func(c *cli.Context) error { + if r := recover(); r != nil { + // Generate report in LOTUS_PATH and re-raise panic + build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagRepoPath), c.App.Name) + panic(r) + } + return nil + }, + } + app.Setup() + app.Metadata["repoType"] = repo.Provider + lcli.RunApp(app) +} + +const ( + FlagRepoPath = "repo-path" +) diff --git a/cmd/lotus-provider/migrate.go b/cmd/lotus-provider/migrate.go new file mode 100644 index 000000000..3869c7dfb --- /dev/null +++ b/cmd/lotus-provider/migrate.go @@ -0,0 +1,247 @@ +package main + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "os" + "path" + "strings" + + "github.com/BurntSushi/toml" + "github.com/fatih/color" + "github.com/ipfs/go-datastore" + "github.com/samber/lo" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/lib/harmony/harmonydb" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/node/repo" +) + +var configMigrateCmd = &cli.Command{ + Name: "from-miner", + Usage: "Express a database config (for lotus-provider) from an existing miner.", + Description: "Express a database config (for lotus-provider) from an existing miner.", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: FlagMinerRepo, + Aliases: []string{FlagMinerRepoDeprecation}, + EnvVars: []string{"LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH"}, + Value: "~/.lotusminer", + Usage: fmt.Sprintf("Specify miner repo path. flag(%s) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON", FlagMinerRepoDeprecation), + }, + &cli.StringFlag{ + Name: "repo", + EnvVars: []string{"LOTUS_PATH"}, + Hidden: true, + Value: "~/.lotus", + }, + &cli.StringFlag{ + Name: "to-layer", + Aliases: []string{"t"}, + Usage: "The layer name for this data push. 'base' is recommended for single-miner setup.", + }, + &cli.BoolFlag{ + Name: "overwrite", + Aliases: []string{"o"}, + Usage: "Use this with --to-layer to replace an existing layer", + }, + }, + Action: fromMiner, +} + +const ( + FlagMinerRepo = "miner-repo" +) + +const FlagMinerRepoDeprecation = "storagerepo" + +func fromMiner(cctx *cli.Context) (err error) { + ctx := context.Background() + cliCommandColor := color.New(color.FgHiBlue).SprintFunc() + configColor := color.New(color.FgHiGreen).SprintFunc() + + r, err := repo.NewFS(cctx.String(FlagMinerRepo)) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + + if !ok { + return fmt.Errorf("repo not initialized") + } + + lr, err := r.LockRO(repo.StorageMiner) + if err != nil { + return fmt.Errorf("locking repo: %w", err) + } + defer func() { _ = lr.Close() }() + + cfgNode, err := lr.Config() + if err != nil { + return fmt.Errorf("getting node config: %w", err) + } + smCfg := cfgNode.(*config.StorageMiner) + + db, err := harmonydb.NewFromConfig(smCfg.HarmonyDB) + if err != nil { + return fmt.Errorf("could not reach the database. Ensure the Miner config toml's HarmonyDB entry"+ + " is setup to reach Yugabyte correctly: %w", err) + } + + var titles []string + err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) + if err != nil { + return fmt.Errorf("miner cannot reach the db. Ensure the config toml's HarmonyDB entry"+ + " is setup to reach Yugabyte correctly: %s", err.Error()) + } + name := cctx.String("to-layer") + if name == "" { + name = fmt.Sprintf("mig%d", len(titles)) + } else { + if lo.Contains(titles, name) && !cctx.Bool("overwrite") { + return errors.New("the overwrite flag is needed to replace existing layer: " + name) + } + } + msg := "Layer " + configColor(name) + ` created. ` + + // Copy over identical settings: + + buf, err := os.ReadFile(path.Join(lr.Path(), "config.toml")) + if err != nil { + return fmt.Errorf("could not read config.toml: %w", err) + } + var lpCfg config.LotusProviderConfig + _, err = toml.Decode(string(buf), &lpCfg) + if err != nil { + return fmt.Errorf("could not decode toml: %w", err) + } + + // Populate Miner Address + mmeta, err := lr.Datastore(ctx, "/metadata") + if err != nil { + return xerrors.Errorf("opening miner metadata datastore: %w", err) + } + defer func() { + _ = mmeta.Close() + }() + + maddrBytes, err := mmeta.Get(ctx, datastore.NewKey("miner-address")) + if err != nil { + return xerrors.Errorf("getting miner address datastore entry: %w", err) + } + + addr, err := address.NewFromBytes(maddrBytes) + if err != nil { + return xerrors.Errorf("parsing miner actor address: %w", err) + } + + lpCfg.Addresses.MinerAddresses = []string{addr.String()} + + ks, err := lr.KeyStore() + if err != nil { + return xerrors.Errorf("keystore err: %w", err) + } + js, err := ks.Get(modules.JWTSecretName) + if err != nil { + return xerrors.Errorf("error getting JWTSecretName: %w", err) + } + + lpCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(js.PrivateKey) + + // Populate API Key + _, header, err := cliutil.GetRawAPI(cctx, repo.FullNode, "v0") + if err != nil { + return fmt.Errorf("cannot read API: %w", err) + } + + ainfo, err := cliutil.GetAPIInfo(&cli.Context{}, repo.FullNode) + if err != nil { + return xerrors.Errorf(`could not get API info for FullNode: %w + Set the environment variable to the value of "lotus auth api-info --perm=admin"`, err) + } + lpCfg.Apis.ChainApiInfo = []string{header.Get("Authorization")[7:] + ":" + ainfo.Addr} + + // Enable WindowPoSt + lpCfg.Subsystems.EnableWindowPost = true + msg += "\nBefore running lotus-provider, ensure any miner/worker answering of WindowPost is disabled by " + + "(on Miner) " + configColor("DisableBuiltinWindowPoSt=true") + " and (on Workers) not enabling windowpost on CLI or via " + + "environment variable " + configColor("LOTUS_WORKER_WINDOWPOST") + "." + + // Express as configTOML + configTOML := &bytes.Buffer{} + if err = toml.NewEncoder(configTOML).Encode(lpCfg); err != nil { + return err + } + + if !lo.Contains(titles, "base") { + cfg, err := getDefaultConfig(true) + if err != nil { + return xerrors.Errorf("Cannot get default config: %w", err) + } + _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", cfg) + + if err != nil { + return err + } + } + + if cctx.Bool("overwrite") { + i, err := db.Exec(ctx, "DELETE FROM harmony_config WHERE title=$1", name) + if i != 0 { + fmt.Println("Overwriting existing layer") + } + if err != nil { + fmt.Println("Got error while deleting existing layer: " + err.Error()) + } + } + + _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", name, configTOML.String()) + if err != nil { + return err + } + + dbSettings := "" + def := config.DefaultStorageMiner().HarmonyDB + if def.Hosts[0] != smCfg.HarmonyDB.Hosts[0] { + dbSettings += ` --db-host="` + strings.Join(smCfg.HarmonyDB.Hosts, ",") + `"` + } + if def.Port != smCfg.HarmonyDB.Port { + dbSettings += " --db-port=" + smCfg.HarmonyDB.Port + } + if def.Username != smCfg.HarmonyDB.Username { + dbSettings += ` --db-user="` + smCfg.HarmonyDB.Username + `"` + } + if def.Password != smCfg.HarmonyDB.Password { + dbSettings += ` --db-password="` + smCfg.HarmonyDB.Password + `"` + } + if def.Database != smCfg.HarmonyDB.Database { + dbSettings += ` --db-name="` + smCfg.HarmonyDB.Database + `"` + } + + var layerMaybe string + if name != "base" { + layerMaybe = "--layer=" + name + } + + msg += ` +To work with the config: +` + cliCommandColor(`lotus-provider `+dbSettings+` config help `) + msg += ` +To run Lotus Provider: in its own machine or cgroup without other files, use the command: +` + cliCommandColor(`lotus-provider `+dbSettings+` run `+layerMaybe) + fmt.Println(msg) + return nil +} diff --git a/cmd/lotus-provider/proving.go b/cmd/lotus-provider/proving.go new file mode 100644 index 000000000..a3211b176 --- /dev/null +++ b/cmd/lotus-provider/proving.go @@ -0,0 +1,198 @@ +package main + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "os" + "time" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/dline" + + "github.com/filecoin-project/lotus/lib/harmony/harmonydb" + "github.com/filecoin-project/lotus/provider" +) + +var testCmd = &cli.Command{ + Name: "test", + Usage: "Utility functions for testing", + Subcommands: []*cli.Command{ + //provingInfoCmd, + wdPostCmd, + }, +} + +var wdPostCmd = &cli.Command{ + Name: "window-post", + Aliases: []string{"wd", "windowpost", "wdpost"}, + Usage: "Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain.", + Subcommands: []*cli.Command{ + wdPostHereCmd, + wdPostTaskCmd, + }, +} + +// wdPostTaskCmd writes to harmony_task and wdpost_partition_tasks, then waits for the result. +// It is intended to be used to test the windowpost scheduler. +// The end of the compute task puts the task_id onto wdpost_proofs, which is read by the submit task. +// The submit task will not send test tasks to the chain, and instead will write the result to harmony_test. +// The result is read by this command, and printed to stdout. +var wdPostTaskCmd = &cli.Command{ + Name: "task", + Aliases: []string{"scheduled", "schedule", "async", "asynchronous"}, + Usage: "Test the windowpost scheduler by running it on the next available lotus-provider. ", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "deadline", + Usage: "deadline to compute WindowPoSt for ", + Value: 0, + }, + &cli.StringSliceFlag{ + Name: "layers", + Usage: "list of layers to be interpreted (atop defaults). Default: base", + Value: cli.NewStringSlice("base"), + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.Background() + + deps, err := getDeps(ctx, cctx) + if err != nil { + return err + } + + ts, err := deps.full.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("cannot get chainhead %w", err) + } + ht := ts.Height() + + addr, err := address.NewFromString(deps.cfg.Addresses.MinerAddresses[0]) + if err != nil { + return xerrors.Errorf("cannot get miner address %w", err) + } + maddr, err := address.IDFromAddress(addr) + if err != nil { + return xerrors.Errorf("cannot get miner id %w", err) + } + var id int64 + _, err = deps.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&id) + if err != nil { + log.Error("inserting harmony_task: ", err) + return false, xerrors.Errorf("inserting harmony_task: %w", err) + } + _, err = tx.Exec(`INSERT INTO wdpost_partition_tasks + (task_id, sp_id, proving_period_start, deadline_index, partition_index) VALUES ($1, $2, $3, $4, $5)`, + id, maddr, ht, cctx.Uint64("deadline"), 0) + if err != nil { + log.Error("inserting wdpost_partition_tasks: ", err) + return false, xerrors.Errorf("inserting wdpost_partition_tasks: %w", err) + } + _, err = tx.Exec("INSERT INTO harmony_test (task_id) VALUES ($1)", id) + if err != nil { + return false, xerrors.Errorf("inserting into harmony_tests: %w", err) + } + return true, nil + }) + if err != nil { + return xerrors.Errorf("writing SQL transaction: %w", err) + } + fmt.Printf("Inserted task %v. Waiting for success ", id) + var result sql.NullString + for { + time.Sleep(time.Second) + err = deps.db.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, id).Scan(&result) + if err != nil { + return xerrors.Errorf("reading result from harmony_test: %w", err) + } + if result.Valid { + break + } + fmt.Print(".") + } + log.Infof("Result: %s", result.String) + return nil + }, +} + +// This command is intended to be used to verify PoSt compute performance. +// It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain. +// The entire processing happens in this process while you wait. It does not use the scheduler. +var wdPostHereCmd = &cli.Command{ + Name: "here", + Aliases: []string{"cli"}, + Usage: "Compute WindowPoSt for performance and configuration testing.", + Description: `Note: This command is intended to be used to verify PoSt compute performance. +It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.`, + ArgsUsage: "[deadline index]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "deadline", + Usage: "deadline to compute WindowPoSt for ", + Value: 0, + }, + &cli.StringSliceFlag{ + Name: "layers", + Usage: "list of layers to be interpreted (atop defaults). Default: base", + Value: cli.NewStringSlice("base"), + }, + &cli.StringFlag{ + Name: "storage-json", + Usage: "path to json file containing storage config", + Value: "~/.lotus-provider/storage.json", + }, + &cli.Uint64Flag{ + Name: "partition", + Usage: "partition to compute WindowPoSt for", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + + ctx := context.Background() + deps, err := getDeps(ctx, cctx) + if err != nil { + return err + } + + wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, deps.cfg.Fees, deps.cfg.Proving, deps.full, deps.verif, deps.lw, nil, + deps.as, deps.maddrs, deps.db, deps.stor, deps.si, deps.cfg.Subsystems.WindowPostMaxTasks) + if err != nil { + return err + } + _, _ = wdPoStSubmitTask, derlareRecoverTask + + if len(deps.maddrs) == 0 { + return errors.New("no miners to compute WindowPoSt for") + } + head, err := deps.full.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("failed to get chain head: %w", err) + } + + di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0) + + for _, maddr := range deps.maddrs { + out, err := wdPostTask.DoPartition(ctx, head, address.Address(maddr), di, cctx.Uint64("partition")) + if err != nil { + fmt.Println("Error computing WindowPoSt for miner", maddr, err) + continue + } + fmt.Println("Computed WindowPoSt for miner", maddr, ":") + err = json.NewEncoder(os.Stdout).Encode(out) + if err != nil { + fmt.Println("Could not encode WindowPoSt output for miner", maddr, err) + continue + } + } + + return nil + }, +} diff --git a/cmd/lotus-provider/rpc/rpc.go b/cmd/lotus-provider/rpc/rpc.go new file mode 100644 index 000000000..3ae3e2a1f --- /dev/null +++ b/cmd/lotus-provider/rpc/rpc.go @@ -0,0 +1,51 @@ +package rpc + +import ( + "context" + "net/http" + + "github.com/gorilla/mux" + + // logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/go-jsonrpc" + "github.com/filecoin-project/go-jsonrpc/auth" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/lib/rpcenc" + "github.com/filecoin-project/lotus/metrics/proxy" +) + +//var log = logging.Logger("lp/rpc") + +func LotusProviderHandler( + authv func(ctx context.Context, token string) ([]auth.Permission, error), + remote http.HandlerFunc, + a api.LotusProvider, + permissioned bool) http.Handler { + mux := mux.NewRouter() + readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() + rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt) + + wapi := proxy.MetricedAPI[api.LotusProvider, api.LotusProviderStruct](a) + if permissioned { + wapi = api.PermissionedAPI[api.LotusProvider, api.LotusProviderStruct](wapi) + } + + rpcServer.Register("Filecoin", wapi) + rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover") + + mux.Handle("/rpc/v0", rpcServer) + mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) + mux.PathPrefix("/remote").HandlerFunc(remote) + mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + + if !permissioned { + return mux + } + + ah := &auth.Handler{ + Verify: authv, + Next: mux.ServeHTTP, + } + return ah +} diff --git a/cmd/lotus-provider/run.go b/cmd/lotus-provider/run.go new file mode 100644 index 000000000..de97aa766 --- /dev/null +++ b/cmd/lotus-provider/run.go @@ -0,0 +1,467 @@ +package main + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gorilla/mux" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/pkg/errors" + "github.com/samber/lo" + "github.com/urfave/cli/v2" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-statestore" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/cmd/lotus-provider/rpc" + "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/journal/alerting" + "github.com/filecoin-project/lotus/journal/fsjournal" + "github.com/filecoin-project/lotus/lib/harmony/harmonydb" + "github.com/filecoin-project/lotus/lib/harmony/harmonytask" + "github.com/filecoin-project/lotus/lib/ulimit" + "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/provider" + "github.com/filecoin-project/lotus/provider/lpmessage" + "github.com/filecoin-project/lotus/provider/lpwinning" + "github.com/filecoin-project/lotus/storage/ctladdr" + "github.com/filecoin-project/lotus/storage/paths" + "github.com/filecoin-project/lotus/storage/sealer" + "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" + "github.com/filecoin-project/lotus/storage/sealer/storiface" +) + +type stackTracer interface { + StackTrace() errors.StackTrace +} + +var runCmd = &cli.Command{ + Name: "run", + Usage: "Start a lotus provider process", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "listen", + Usage: "host address and port the worker api will listen on", + Value: "0.0.0.0:12300", + EnvVars: []string{"LOTUS_WORKER_LISTEN"}, + }, + &cli.BoolFlag{ + Name: "nosync", + Usage: "don't check full-node sync status", + }, + &cli.BoolFlag{ + Name: "halt-after-init", + Usage: "only run init, then return", + Hidden: true, + }, + &cli.BoolFlag{ + Name: "manage-fdlimit", + Usage: "manage open file limit", + Value: true, + }, + &cli.StringSliceFlag{ + Name: "layers", + Usage: "list of layers to be interpreted (atop defaults). Default: base", + Value: cli.NewStringSlice("base"), + }, + &cli.StringFlag{ + Name: "storage-json", + Usage: "path to json file containing storage config", + Value: "~/.lotus-provider/storage.json", + }, + &cli.StringFlag{ + Name: "journal", + Usage: "path to journal files", + Value: "~/.lotus-provider/", + }, + }, + Action: func(cctx *cli.Context) (err error) { + defer func() { + if err != nil { + if err, ok := err.(stackTracer); ok { + for _, f := range err.StackTrace() { + fmt.Printf("%+s:%d\n", f, f) + } + } + } + }() + if !cctx.Bool("enable-gpu-proving") { + err := os.Setenv("BELLMAN_NO_GPU", "true") + if err != nil { + return err + } + } + + ctx, _ := tag.New(lcli.DaemonContext(cctx), + tag.Insert(metrics.Version, build.BuildVersion), + tag.Insert(metrics.Commit, build.CurrentCommit), + tag.Insert(metrics.NodeType, "provider"), + ) + shutdownChan := make(chan struct{}) + { + var ctxclose func() + ctx, ctxclose = context.WithCancel(ctx) + go func() { + <-shutdownChan + ctxclose() + }() + } + // Register all metric views + /* + if err := view.Register( + metrics.MinerNodeViews..., + ); err != nil { + log.Fatalf("Cannot register the view: %v", err) + } + */ + // Set the metric to one so it is published to the exporter + stats.Record(ctx, metrics.LotusInfo.M(1)) + + if cctx.Bool("manage-fdlimit") { + if _, _, err := ulimit.ManageFdLimit(); err != nil { + log.Errorf("setting file descriptor limit: %s", err) + } + } + + deps, err := getDeps(ctx, cctx) + + if err != nil { + return err + } + cfg, db, full, verif, lw, as, maddrs, stor, si, localStore := deps.cfg, deps.db, deps.full, deps.verif, deps.lw, deps.as, deps.maddrs, deps.stor, deps.si, deps.localStore + + var activeTasks []harmonytask.TaskInterface + + sender, sendTask := lpmessage.NewSender(full, full, db) + activeTasks = append(activeTasks, sendTask) + + /////////////////////////////////////////////////////////////////////// + ///// Task Selection + /////////////////////////////////////////////////////////////////////// + { + + if cfg.Subsystems.EnableWindowPost { + wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender, + as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks) + if err != nil { + return err + } + activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask) + } + + if cfg.Subsystems.EnableWinningPost { + winPoStTask := lpwinning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, lw, verif, full, maddrs) + activeTasks = append(activeTasks, winPoStTask) + } + } + log.Infow("This lotus_provider instance handles", + "miner_addresses", minerAddressesToStrings(maddrs), + "tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name })) + + taskEngine, err := harmonytask.New(db, activeTasks, deps.listenAddr) + if err != nil { + return err + } + + defer taskEngine.GracefullyTerminate(time.Hour) + + fh := &paths.FetchHandler{Local: localStore, PfHandler: &paths.DefaultPartialFileHandler{}} + remoteHandler := func(w http.ResponseWriter, r *http.Request) { + if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"}) + return + } + + fh.ServeHTTP(w, r) + } + // local APIs + { + // debugging + mux := mux.NewRouter() + mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + mux.PathPrefix("/remote").HandlerFunc(remoteHandler) + + /*ah := &auth.Handler{ + Verify: authv, + Next: mux.ServeHTTP, + }*/ // todo + + } + + var authVerify func(context.Context, string) ([]auth.Permission, error) + { + privateKey, err := base64.StdEncoding.DecodeString(deps.cfg.Apis.StorageRPCSecret) + if err != nil { + return xerrors.Errorf("decoding storage rpc secret: %w", err) + } + authVerify = func(ctx context.Context, token string) ([]auth.Permission, error) { + var payload jwtPayload + if _, err := jwt.Verify([]byte(token), jwt.NewHS256(privateKey), &payload); err != nil { + return nil, xerrors.Errorf("JWT Verification failed: %w", err) + } + + return payload.Allow, nil + } + } + // Serve the RPC. + srv := &http.Server{ + Handler: rpc.LotusProviderHandler( + authVerify, + remoteHandler, + &ProviderAPI{deps, shutdownChan}, + true), + ReadHeaderTimeout: time.Minute * 3, + BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker")) + return ctx + }, + } + + go func() { + <-ctx.Done() + log.Warn("Shutting down...") + if err := srv.Shutdown(context.TODO()); err != nil { + log.Errorf("shutting down RPC server failed: %s", err) + } + log.Warn("Graceful shutdown successful") + }() + + // Monitor for shutdown. + // TODO provide a graceful shutdown API on shutdownChan + finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, + //node.ShutdownHandler{Component: "provider", StopFunc: stop}, + + <-finishCh + return nil + }, +} + +func makeDB(cctx *cli.Context) (*harmonydb.DB, error) { + dbConfig := config.HarmonyDB{ + Username: cctx.String("db-user"), + Password: cctx.String("db-password"), + Hosts: strings.Split(cctx.String("db-host"), ","), + Database: cctx.String("db-name"), + Port: cctx.String("db-port"), + } + return harmonydb.NewFromConfig(dbConfig) +} + +type jwtPayload struct { + Allow []auth.Permission +} + +func StorageAuth(apiKey string) (sealer.StorageAuth, error) { + if apiKey == "" { + return nil, xerrors.Errorf("no api key provided") + } + + rawKey, err := base64.StdEncoding.DecodeString(apiKey) + if err != nil { + return nil, xerrors.Errorf("decoding api key: %w", err) + } + + key := jwt.NewHS256(rawKey) + + p := jwtPayload{ + Allow: []auth.Permission{"admin"}, + } + + token, err := jwt.Sign(&p, key) + if err != nil { + return nil, err + } + + headers := http.Header{} + headers.Add("Authorization", "Bearer "+string(token)) + return sealer.StorageAuth(headers), nil +} + +type Deps struct { + cfg *config.LotusProviderConfig + db *harmonydb.DB + full api.FullNode + verif storiface.Verifier + lw *sealer.LocalWorker + as *ctladdr.AddressSelector + maddrs []dtypes.MinerAddress + stor *paths.Remote + si *paths.DBIndex + localStore *paths.Local + listenAddr string +} + +func getDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) { + // Open repo + + repoPath := cctx.String(FlagRepoPath) + fmt.Println("repopath", repoPath) + r, err := repo.NewFS(repoPath) + if err != nil { + return nil, err + } + + ok, err := r.Exists() + if err != nil { + return nil, err + } + if !ok { + if err := r.Init(repo.Provider); err != nil { + return nil, err + } + } + + db, err := makeDB(cctx) + if err != nil { + return nil, err + } + + /////////////////////////////////////////////////////////////////////// + ///// Dependency Setup + /////////////////////////////////////////////////////////////////////// + + // The config feeds into task runners & their helpers + cfg, err := getConfig(cctx, db) + if err != nil { + return nil, err + } + + log.Debugw("config", "config", cfg) + + var verif storiface.Verifier = ffiwrapper.ProofVerifier + + as, err := provider.AddressSelector(&cfg.Addresses)() + if err != nil { + return nil, err + } + + de, err := journal.ParseDisabledEvents(cfg.Journal.DisabledEvents) + if err != nil { + return nil, err + } + j, err := fsjournal.OpenFSJournalPath(cctx.String("journal"), de) + if err != nil { + return nil, err + } + + full, fullCloser, err := cliutil.GetFullNodeAPIV1LotusProvider(cctx, cfg.Apis.ChainApiInfo) + if err != nil { + return nil, err + } + + go func() { + select { + case <-ctx.Done(): + fullCloser() + _ = j.Close() + } + }() + sa, err := StorageAuth(cfg.Apis.StorageRPCSecret) + if err != nil { + return nil, xerrors.Errorf(`'%w' while parsing the config toml's + [Apis] + StorageRPCSecret=%v +Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, err, cfg.Apis.StorageRPCSecret) + } + + al := alerting.NewAlertingSystem(j) + si := paths.NewDBIndex(al, db) + bls := &paths.BasicLocalStorage{ + PathToJSON: cctx.String("storage-json"), + } + + listenAddr := cctx.String("listen") + const unspecifiedAddress = "0.0.0.0" + addressSlice := strings.Split(listenAddr, ":") + if ip := net.ParseIP(addressSlice[0]); ip != nil { + if ip.String() == unspecifiedAddress { + rip, err := db.GetRoutableIP() + if err != nil { + return nil, err + } + listenAddr = rip + ":" + addressSlice[1] + } + } + localStore, err := paths.NewLocal(ctx, bls, si, []string{"http://" + listenAddr + "/remote"}) + if err != nil { + return nil, err + } + + stor := paths.NewRemote(localStore, si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{}) + + wstates := statestore.New(dssync.MutexWrap(ds.NewMapDatastore())) + + // todo localWorker isn't the abstraction layer we want to use here, we probably want to go straight to ffiwrapper + // maybe with a lotus-provider specific abstraction. LocalWorker does persistent call tracking which we probably + // don't need (ehh.. maybe we do, the async callback system may actually work decently well with harmonytask) + lw := sealer.NewLocalWorker(sealer.WorkerConfig{}, stor, localStore, si, nil, wstates) + + var maddrs []dtypes.MinerAddress + for _, s := range cfg.Addresses.MinerAddresses { + addr, err := address.NewFromString(s) + if err != nil { + return nil, err + } + maddrs = append(maddrs, dtypes.MinerAddress(addr)) + } + + return &Deps{ // lint: intentionally not-named so it will fail if one is forgotten + cfg, + db, + full, + verif, + lw, + as, + maddrs, + stor, + si, + localStore, + listenAddr, + }, nil + +} + +type ProviderAPI struct { + *Deps + ShutdownChan chan struct{} +} + +func (p *ProviderAPI) Version(context.Context) (api.Version, error) { + return api.ProviderAPIVersion0, nil +} + +// Trigger shutdown +func (p *ProviderAPI) Shutdown(context.Context) error { + close(p.ShutdownChan) + return nil +} + +func minerAddressesToStrings(maddrs []dtypes.MinerAddress) []string { + strs := make([]string, len(maddrs)) + for i, addr := range maddrs { + strs[i] = address.Address(addr).String() + } + return strs +} diff --git a/cmd/lotus-provider/stop.go b/cmd/lotus-provider/stop.go new file mode 100644 index 000000000..3376d762a --- /dev/null +++ b/cmd/lotus-provider/stop.go @@ -0,0 +1,29 @@ +package main + +import ( + _ "net/http/pprof" + + "github.com/urfave/cli/v2" + + lcli "github.com/filecoin-project/lotus/cli" +) + +var stopCmd = &cli.Command{ + Name: "stop", + Usage: "Stop a running lotus provider", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetAPI(cctx) + if err != nil { + return err + } + defer closer() + + err = api.Shutdown(lcli.ReqContext(cctx)) + if err != nil { + return err + } + + return nil + }, +} diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go index 6795f1528..4ead8467e 100644 --- a/cmd/lotus-shed/genesis-verify.go +++ b/cmd/lotus-shed/genesis-verify.go @@ -62,7 +62,7 @@ var genesisVerifyCmd = &cli.Command{ return xerrors.Errorf("opening the car file: %w", err) } - ts, err := cs.Import(cctx.Context, f) + ts, _, err := cs.Import(cctx.Context, f) if err != nil { return err } diff --git a/cmd/lotus-shed/invariants.go b/cmd/lotus-shed/invariants.go index e74a0dd24..5c6fb2d4f 100644 --- a/cmd/lotus-shed/invariants.go +++ b/cmd/lotus-shed/invariants.go @@ -3,7 +3,8 @@ package main import ( "context" "fmt" - "io" + "os" + "path/filepath" "strconv" "time" @@ -21,6 +22,8 @@ import ( v9 "github.com/filecoin-project/go-state-types/builtin/v9" "github.com/filecoin-project/lotus/blockstore" + badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + "github.com/filecoin-project/lotus/blockstore/splitstore" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" @@ -73,24 +76,52 @@ var invariantsCmd = &cli.Command{ defer lkrepo.Close() //nolint:errcheck - bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + cold, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { - return fmt.Errorf("failed to open blockstore: %w", err) + return fmt.Errorf("failed to open universal blockstore %w", err) } - defer func() { - if c, ok := bs.(io.Closer); ok { - if err := c.Close(); err != nil { - log.Warnf("failed to close blockstore: %s", err) - } - } - }() + path, err := lkrepo.SplitstorePath() + if err != nil { + return err + } + + path = filepath.Join(path, "hot.badger") + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + opts, err := repo.BadgerBlockstoreOptions(repo.HotBlockstore, path, lkrepo.Readonly()) + if err != nil { + return err + } + + hot, err := badgerbs.Open(opts) + if err != nil { + return err + } mds, err := lkrepo.Datastore(context.Background(), "/metadata") if err != nil { return err } + cfg := &splitstore.Config{ + MarkSetType: "map", + DiscardColdBlocks: true, + } + ss, err := splitstore.Open(path, mds, hot, cold, cfg) + if err != nil { + return err + } + defer func() { + if err := ss.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + + } + }() + bs := ss + cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck diff --git a/cmd/lotus-shed/mpool.go b/cmd/lotus-shed/mpool.go index cfbff2abd..6b210bbc1 100644 --- a/cmd/lotus-shed/mpool.go +++ b/cmd/lotus-shed/mpool.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "time" "github.com/urfave/cli/v2" @@ -43,10 +44,20 @@ var minerSelectMsgsCmd = &cli.Command{ return err } + // Get the size of the mempool + pendingMsgs, err := api.MpoolPending(ctx, types.EmptyTSK) + if err != nil { + return err + } + mpoolSize := len(pendingMsgs) + + // Measure the time taken by MpoolSelect + startTime := time.Now() msgs, err := api.MpoolSelect(ctx, head.Key(), cctx.Float64("ticket-quality")) if err != nil { return err } + duration := time.Since(startTime) var totalGas int64 for i, f := range msgs { @@ -64,6 +75,9 @@ var minerSelectMsgsCmd = &cli.Command{ totalGas += f.Message.GasLimit } + // Log the duration, size of the mempool, selected messages and total gas limit of selected messages + fmt.Printf("Message selection took %s\n", duration) + fmt.Printf("Size of the mempool: %d\n", mpoolSize) fmt.Println("selected messages: ", len(msgs)) fmt.Printf("total gas limit of selected messages: %d / %d (%0.2f%%)\n", totalGas, build.BlockGasLimit, 100*float64(totalGas)/float64(build.BlockGasLimit)) return nil diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index 275f3bc0a..c0bd453b1 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" - badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/node/repo" @@ -144,13 +144,6 @@ var stateTreePruneCmd = &cli.Command{ } }() - // After migrating to native blockstores, this has been made - // database-specific. - badgbs, ok := bs.(*badgerbs.Blockstore) - if !ok { - return fmt.Errorf("only badger blockstores are supported") - } - mds, err := lkrepo.Datastore(context.Background(), "/metadata") if err != nil { return err @@ -160,8 +153,12 @@ var stateTreePruneCmd = &cli.Command{ const DiscardRatio = 0.2 if cctx.Bool("only-ds-gc") { fmt.Println("running datastore gc....") + gbs, ok := bs.(blockstore.BlockstoreGCOnce) + if !ok { + return xerrors.Errorf("blockstore %T does not support GC", bs) + } for i := 0; i < cctx.Int("gc-count"); i++ { - if err := badgbs.DB().RunValueLogGC(DiscardRatio); err != nil { + if err := gbs.GCOnce(ctx, blockstore.WithThreshold(DiscardRatio)); err != nil { return xerrors.Errorf("datastore GC failed: %w", err) } } @@ -208,13 +205,6 @@ var stateTreePruneCmd = &cli.Command{ return nil } - b := badgbs.DB().NewWriteBatch() - defer b.Cancel() - - markForRemoval := func(c cid.Cid) error { - return b.Delete(badgbs.StorageKey(nil, c)) - } - keys, err := bs.AllKeysChan(context.Background()) if err != nil { return xerrors.Errorf("failed to query blockstore: %w", err) @@ -225,12 +215,12 @@ var stateTreePruneCmd = &cli.Command{ var deleteCount int var goodHits int for k := range keys { - if goodSet.HasRaw(k.Bytes()) { + if goodSet.Has(k) { goodHits++ continue } - if err := markForRemoval(k); err != nil { + if err := bs.DeleteBlock(ctx, k); err != nil { return fmt.Errorf("failed to remove cid %s: %w", k, err) } @@ -243,13 +233,15 @@ var stateTreePruneCmd = &cli.Command{ } } - if err := b.Flush(); err != nil { - return xerrors.Errorf("failed to flush final batch delete: %w", err) + fmt.Println("running datastore gc....") + gbs, ok := bs.(blockstore.BlockstoreGCOnce) + if !ok { + fmt.Println("gc not supported...") + return nil } - fmt.Println("running datastore gc....") for i := 0; i < cctx.Int("gc-count"); i++ { - if err := badgbs.DB().RunValueLogGC(DiscardRatio); err != nil { + if err := gbs.GCOnce(ctx, blockstore.WithThreshold(DiscardRatio)); err != nil { return xerrors.Errorf("datastore GC failed: %w", err) } } diff --git a/cmd/lotus-shed/terminations.go b/cmd/lotus-shed/terminations.go index c5f35995a..563c1ba3a 100644 --- a/cmd/lotus-shed/terminations.go +++ b/cmd/lotus-shed/terminations.go @@ -157,7 +157,8 @@ var terminationsCmd = &cli.Command{ } for _, t := range termParams.Terminations { - sectors, err := minerSt.LoadSectors(&t.Sectors) + tmp := t.Sectors + sectors, err := minerSt.LoadSectors(&tmp) if err != nil { return err } diff --git a/cmd/lotus-sim/simulation/stages/funding_stage.go b/cmd/lotus-sim/simulation/stages/funding_stage.go index f75a9910d..4ce4afae1 100644 --- a/cmd/lotus-sim/simulation/stages/funding_stage.go +++ b/cmd/lotus-sim/simulation/stages/funding_stage.go @@ -166,7 +166,8 @@ func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.Block ) }() - for _, actor := range targets { + for _, actorTmp := range targets { + actor := actorTmp switch { case builtin.IsAccountActor(actor.Code): if _, err := bb.PushMessage(&types.Message{ diff --git a/cmd/lotus-worker/main.go b/cmd/lotus-worker/main.go index 873dada47..41af11bdd 100644 --- a/cmd/lotus-worker/main.go +++ b/cmd/lotus-worker/main.go @@ -7,6 +7,7 @@ import ( "net" "net/http" "os" + "os/signal" "path/filepath" "reflect" "strings" @@ -38,6 +39,7 @@ import ( "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/paths" "github.com/filecoin-project/lotus/storage/sealer" + "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" "github.com/filecoin-project/lotus/storage/sealer/sealtasks" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -283,7 +285,36 @@ var runCmd = &cli.Command{ Value: true, DefaultText: "inherits --addpiece", }, + &cli.StringFlag{ + Name: "external-pc2", + Usage: "command for computing PC2 externally", + }, }, + Description: `Run lotus-worker. + +--external-pc2 can be used to compute the PreCommit2 inputs externally. +The flag behaves similarly to the related lotus-worker flag, using it in +lotus-bench may be useful for testing if the external PreCommit2 command is +invoked correctly. + +The command will be called with a number of environment variables set: +* EXTSEAL_PC2_SECTOR_NUM: the sector number +* EXTSEAL_PC2_SECTOR_MINER: the miner id +* EXTSEAL_PC2_PROOF_TYPE: the proof type +* EXTSEAL_PC2_SECTOR_SIZE: the sector size in bytes +* EXTSEAL_PC2_CACHE: the path to the cache directory +* EXTSEAL_PC2_SEALED: the path to the sealed sector file (initialized with unsealed data by the caller) +* EXTSEAL_PC2_PC1OUT: output from rust-fil-proofs precommit1 phase (base64 encoded json) + +The command is expected to: +* Create cache sc-02-data-tree-r* files +* Create cache sc-02-data-tree-c* files +* Create cache p_aux / t_aux files +* Transform the sealed file in place + +Example invocation of lotus-bench as external executor: +'./lotus-bench simple precommit2 --sector-size $EXTSEAL_PC2_SECTOR_SIZE $EXTSEAL_PC2_SEALED $EXTSEAL_PC2_CACHE $EXTSEAL_PC2_PC1OUT' +`, Before: func(cctx *cli.Context) error { if cctx.IsSet("address") { log.Warnf("The '--address' flag is deprecated, it has been replaced by '--listen'") @@ -348,6 +379,18 @@ var runCmd = &cli.Command{ // Connect to storage-miner ctx := lcli.ReqContext(cctx) + // Create a new context with cancel function + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Listen for interrupt signals + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + <-c + cancel() + }() + var nodeApi api.StorageMiner var closer func() for { @@ -359,14 +402,13 @@ var runCmd = &cli.Command{ } } fmt.Printf("\r\x1b[0KConnecting to miner API... (%s)", err) - time.Sleep(time.Second) - continue + select { + case <-ctx.Done(): + return xerrors.New("Interrupted by user") + case <-time.After(time.Second): + } } - defer closer() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - // Register all metric views if err := view.Register( metrics.DefaultViews..., @@ -611,18 +653,32 @@ var runCmd = &cli.Command{ fh.ServeHTTP(w, r) } + // Parse ffi executor flags + + var ffiOpts []ffiwrapper.FFIWrapperOpt + + if cctx.IsSet("external-pc2") { + extSeal := ffiwrapper.ExternalSealer{ + PreCommit2: ffiwrapper.MakeExternPrecommit2(cctx.String("external-pc2")), + } + + ffiOpts = append(ffiOpts, ffiwrapper.WithExternalSealCalls(extSeal)) + } + // Create / expose the worker wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix)) workerApi := &sealworker.Worker{ - LocalWorker: sealer.NewLocalWorker(sealer.WorkerConfig{ - TaskTypes: taskTypes, - NoSwap: cctx.Bool("no-swap"), - MaxParallelChallengeReads: cctx.Int("post-parallel-reads"), - ChallengeReadTimeout: cctx.Duration("post-read-timeout"), - Name: cctx.String("name"), - }, remote, localStore, nodeApi, nodeApi, wsts), + LocalWorker: sealer.NewLocalWorkerWithExecutor( + sealer.FFIExec(ffiOpts...), + sealer.WorkerConfig{ + TaskTypes: taskTypes, + NoSwap: cctx.Bool("no-swap"), + MaxParallelChallengeReads: cctx.Int("post-parallel-reads"), + ChallengeReadTimeout: cctx.Duration("post-read-timeout"), + Name: cctx.String("name"), + }, os.LookupEnv, remote, localStore, nodeApi, nodeApi, wsts), LocalStore: localStore, Storage: lr, } diff --git a/cmd/lotus-worker/sealworker/rpc.go b/cmd/lotus-worker/sealworker/rpc.go index 97f78942e..4e720ef64 100644 --- a/cmd/lotus-worker/sealworker/rpc.go +++ b/cmd/lotus-worker/sealworker/rpc.go @@ -26,7 +26,11 @@ import ( var log = logging.Logger("sealworker") -func WorkerHandler(authv func(ctx context.Context, token string) ([]auth.Permission, error), remote http.HandlerFunc, a api.Worker, permissioned bool) http.Handler { +func WorkerHandler( + authv func(ctx context.Context, token string) ([]auth.Permission, error), + remote http.HandlerFunc, + a api.Worker, + permissioned bool) http.Handler { mux := mux.NewRouter() readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt) diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 7271a6e53..5d8096d1f 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -291,10 +291,55 @@ var DaemonCmd = &cli.Command{ chainfile := cctx.String("import-chain") snapshot := cctx.String("import-snapshot") + willImportChain := false if chainfile != "" || snapshot != "" { if chainfile != "" && snapshot != "" { return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'") } + willImportChain = true + } + + willRemoveChain := cctx.Bool("remove-existing-chain") + if willImportChain && !willRemoveChain { + // Confirm with the user about the intention to remove chain data. + reader := bufio.NewReader(os.Stdin) + fmt.Print("Importing chain or snapshot will by default delete existing local chain data. Do you want to proceed and delete? (yes/no): ") + userInput, err := reader.ReadString('\n') + if err != nil { + return xerrors.Errorf("reading user input: %w", err) + } + userInput = strings.ToLower(strings.TrimSpace(userInput)) + + if userInput == "yes" { + willRemoveChain = true + } else if userInput == "no" { + willRemoveChain = false + } else { + return fmt.Errorf("invalid input. please answer with 'yes' or 'no'") + } + } + + if willRemoveChain { + lr, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("error opening fs repo: %w", err) + } + + exists, err := lr.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + err = removeExistingChain(cctx, lr) + if err != nil { + return err + } + } + + if willImportChain { var issnapshot bool if chainfile == "" { chainfile = snapshot @@ -540,7 +585,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) } bar.Start() - ts, err := cst.Import(ctx, ir) + ts, gen, err := cst.Import(ctx, ir) bar.Finish() if err != nil { @@ -551,18 +596,14 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("flushing validation cache failed: %w", err) } - gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true) - if err != nil { - return err - } - - err = cst.SetGenesis(ctx, gb.Blocks()[0]) + log.Infof("setting genesis") + err = cst.SetGenesis(ctx, gen) if err != nil { return err } if !snapshot { - shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil) + shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gen.Timestamp, nil) if err != nil { return xerrors.Errorf("failed to construct beacon schedule: %w", err) } diff --git a/documentation/en/api-v0-methods-provider.md b/documentation/en/api-v0-methods-provider.md new file mode 100644 index 000000000..fc4a2daf7 --- /dev/null +++ b/documentation/en/api-v0-methods-provider.md @@ -0,0 +1,25 @@ +# Groups +* [](#) + * [Shutdown](#Shutdown) + * [Version](#Version) +## + + +### Shutdown + + +Perms: admin + +Inputs: `null` + +Response: `{}` + +### Version + + +Perms: admin + +Inputs: `null` + +Response: `131840` + diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 742f3de8e..9110c1c3b 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -2289,7 +2289,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true }, [ { @@ -2766,7 +2767,8 @@ Inputs: ], { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` @@ -3025,7 +3027,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index e2c249395..c2929f0f5 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -2402,14 +2402,7 @@ Perms: read Inputs: ```json [ - { - "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031", - "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031", - "gas": "0x5", - "gasPrice": "0x0", - "value": "0x0", - "data": "0x07" - } + "Bw==" ] ``` @@ -3357,7 +3350,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true }, [ { @@ -3834,7 +3828,8 @@ Inputs: ], { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` @@ -4226,7 +4221,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` diff --git a/documentation/en/block-validation.md b/documentation/en/block-validation.md index d178a0667..ac711c6bf 100644 --- a/documentation/en/block-validation.md +++ b/documentation/en/block-validation.md @@ -104,9 +104,9 @@ domain separation tag. ### Winning PoSt proof Draw randomness for current epoch with `WinningPoSt` domain separation tag. -Get list of sectors challanged in this epoch for this miner, based on the randomness drawn. +Get list of sectors challenged in this epoch for this miner, based on the randomness drawn. -`V`: Use filecoin proofs system to verify that miner prooved access to sealed versions of these sectors. +`V`: Use filecoin proofs system to verify that miner proved access to sealed versions of these sectors. ## `(*StateManager).TipSetState()` diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 6693d2a79..7f1c0a01a 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.25.1 + 1.25.2 COMMANDS: init Initialize a lotus miner repo @@ -231,8 +231,19 @@ OPTIONS: --help, -h show help ``` -#### lotus-miner actor set-addresses, set-addrs +### lotus-miner actor set-addresses ``` +NAME: + lotus-miner actor set-addresses - set addresses that your miner can be publicly dialed on + +USAGE: + lotus-miner actor set-addresses [command options] + +OPTIONS: + --from value optionally specify the account to send the message from + --gas-limit value set gas limit (default: 0) + --unset unset address (default: false) + --help, -h show help ``` ### lotus-miner actor withdraw @@ -1161,8 +1172,20 @@ OPTIONS: --help, -h show help ``` -##### lotus-miner proving compute windowed-post, window-post +#### lotus-miner proving compute windowed-post ``` +NAME: + lotus-miner proving compute windowed-post - Compute WindowPoSt for a specific deadline + +USAGE: + lotus-miner proving compute windowed-post [command options] [deadline index] + +DESCRIPTION: + Note: This command is intended to be used to verify PoSt compute performance. + It will not send any messages to the chain. + +OPTIONS: + --help, -h show help ``` ### lotus-miner proving recover-faults diff --git a/documentation/en/cli-lotus-provider.md b/documentation/en/cli-lotus-provider.md new file mode 100644 index 000000000..932350fe2 --- /dev/null +++ b/documentation/en/cli-lotus-provider.md @@ -0,0 +1,410 @@ +# lotus-provider +``` +NAME: + lotus-provider - Filecoin decentralized storage network provider + +USAGE: + lotus-provider [global options] command [command options] [arguments...] + +VERSION: + 1.25.2 + +COMMANDS: + run Start a lotus provider process + stop Stop a running lotus provider + config Manage node config by layers. The layer 'base' will always be applied. + test Utility functions for testing + version Print version + help, h Shows a list of commands or help for one command + DEVELOPER: + auth Manage RPC permissions + log Manage logging + wait-api Wait for lotus api to come online + fetch-params Fetch proving parameters + +GLOBAL OPTIONS: + --color use color in display output (default: depends on output being a TTY) + --db-host value Command separated list of hostnames for yugabyte cluster (default: "yugabyte") [$LOTUS_DB_HOST] + --db-name value (default: "yugabyte") [$LOTUS_DB_NAME, $LOTUS_HARMONYDB_HOSTS] + --db-user value (default: "yugabyte") [$LOTUS_DB_USER, $LOTUS_HARMONYDB_USERNAME] + --db-password value (default: "yugabyte") [$LOTUS_DB_PASSWORD, $LOTUS_HARMONYDB_PASSWORD] + --layers value (default: "base") [$LOTUS_LAYERS, $LOTUS_CONFIG_LAYERS] + --repo-path value (default: "~/.lotusprovider") [$LOTUS_REPO_PATH] + --vv enables very verbose mode, useful for debugging the CLI (default: false) + --help, -h show help + --version, -v print the version +``` + +## lotus-provider run +``` +NAME: + lotus-provider run - Start a lotus provider process + +USAGE: + lotus-provider run [command options] [arguments...] + +OPTIONS: + --listen value host address and port the worker api will listen on (default: "0.0.0.0:12300") [$LOTUS_WORKER_LISTEN] + --nosync don't check full-node sync status (default: false) + --manage-fdlimit manage open file limit (default: true) + --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base") + --storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json") + --journal value path to journal files (default: "~/.lotus-provider/") + --help, -h show help +``` + +## lotus-provider stop +``` +NAME: + lotus-provider stop - Stop a running lotus provider + +USAGE: + lotus-provider stop [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +## lotus-provider config +``` +NAME: + lotus-provider config - Manage node config by layers. The layer 'base' will always be applied. + +USAGE: + lotus-provider config command [command options] [arguments...] + +COMMANDS: + default, defaults Print default node config + set, add, update, create Set a config layer or the base by providing a filename or stdin. + get, cat, show Get a config layer by name. You may want to pipe the output to a file, or use 'less' + list, ls List config layers you can get. + interpret, view, stacked, stack Interpret stacked config layers by this version of lotus-provider, with system-generated comments. + remove, rm, del, delete Remove a named config layer. + from-miner Express a database config (for lotus-provider) from an existing miner. + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### lotus-provider config default +``` +NAME: + lotus-provider config default - Print default node config + +USAGE: + lotus-provider config default [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help +``` + +### lotus-provider config set +``` +NAME: + lotus-provider config set - Set a config layer or the base by providing a filename or stdin. + +USAGE: + lotus-provider config set [command options] a layer's file name + +OPTIONS: + --title value title of the config layer (req'd for stdin) + --help, -h show help +``` + +### lotus-provider config get +``` +NAME: + lotus-provider config get - Get a config layer by name. You may want to pipe the output to a file, or use 'less' + +USAGE: + lotus-provider config get [command options] layer name + +OPTIONS: + --help, -h show help +``` + +### lotus-provider config list +``` +NAME: + lotus-provider config list - List config layers you can get. + +USAGE: + lotus-provider config list [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +### lotus-provider config interpret +``` +NAME: + lotus-provider config interpret - Interpret stacked config layers by this version of lotus-provider, with system-generated comments. + +USAGE: + lotus-provider config interpret [command options] a list of layers to be interpreted as the final config + +OPTIONS: + --layers value [ --layers value ] comma or space separated list of layers to be interpreted (default: "base") + --help, -h show help +``` + +### lotus-provider config remove +``` +NAME: + lotus-provider config remove - Remove a named config layer. + +USAGE: + lotus-provider config remove [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +### lotus-provider config from-miner +``` +NAME: + lotus-provider config from-miner - Express a database config (for lotus-provider) from an existing miner. + +USAGE: + lotus-provider config from-miner [command options] [arguments...] + +DESCRIPTION: + Express a database config (for lotus-provider) from an existing miner. + +OPTIONS: + --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] + --to-layer value, -t value The layer name for this data push. 'base' is recommended for single-miner setup. + --overwrite, -o Use this with --to-layer to replace an existing layer (default: false) + --help, -h show help +``` + +## lotus-provider test +``` +NAME: + lotus-provider test - Utility functions for testing + +USAGE: + lotus-provider test command [command options] [arguments...] + +COMMANDS: + window-post, wd, windowpost, wdpost Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### lotus-provider test window-post +``` +NAME: + lotus-provider test window-post - Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. + +USAGE: + lotus-provider test window-post command [command options] [arguments...] + +COMMANDS: + here, cli Compute WindowPoSt for performance and configuration testing. + task, scheduled, schedule, async, asynchronous Test the windowpost scheduler by running it on the next available lotus-provider. + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +#### lotus-provider test window-post here +``` +NAME: + lotus-provider test window-post here - Compute WindowPoSt for performance and configuration testing. + +USAGE: + lotus-provider test window-post here [command options] [deadline index] + +DESCRIPTION: + Note: This command is intended to be used to verify PoSt compute performance. + It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain. + +OPTIONS: + --deadline value deadline to compute WindowPoSt for (default: 0) + --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base") + --storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json") + --partition value partition to compute WindowPoSt for (default: 0) + --help, -h show help +``` + +#### lotus-provider test window-post task +``` +NAME: + lotus-provider test window-post task - Test the windowpost scheduler by running it on the next available lotus-provider. + +USAGE: + lotus-provider test window-post task [command options] [arguments...] + +OPTIONS: + --deadline value deadline to compute WindowPoSt for (default: 0) + --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base") + --help, -h show help +``` + +## lotus-provider version +``` +NAME: + lotus-provider version - Print version + +USAGE: + lotus-provider version [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +## lotus-provider auth +``` +NAME: + lotus-provider auth - Manage RPC permissions + +USAGE: + lotus-provider auth command [command options] [arguments...] + +COMMANDS: + create-token Create token + api-info Get token with API info required to connect to this node + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### lotus-provider auth create-token +``` +NAME: + lotus-provider auth create-token - Create token + +USAGE: + lotus-provider auth create-token [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help +``` + +### lotus-provider auth api-info +``` +NAME: + lotus-provider auth api-info - Get token with API info required to connect to this node + +USAGE: + lotus-provider auth api-info [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help +``` + +## lotus-provider log +``` +NAME: + lotus-provider log - Manage logging + +USAGE: + lotus-provider log command [command options] [arguments...] + +COMMANDS: + list List log systems + set-level Set log level + alerts Get alert states + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### lotus-provider log list +``` +NAME: + lotus-provider log list - List log systems + +USAGE: + lotus-provider log list [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +### lotus-provider log set-level +``` +NAME: + lotus-provider log set-level - Set log level + +USAGE: + lotus-provider log set-level [command options] [level] + +DESCRIPTION: + Set the log level for logging systems: + + The system flag can be specified multiple times. + + eg) log set-level --system chain --system chainxchg debug + + Available Levels: + debug + info + warn + error + + Environment Variables: + GOLOG_LOG_LEVEL - Default log level for all log systems + GOLOG_LOG_FMT - Change output log format (json, nocolor) + GOLOG_FILE - Write logs to file + GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr + + +OPTIONS: + --system value [ --system value ] limit to log system + --help, -h show help +``` + +### lotus-provider log alerts +``` +NAME: + lotus-provider log alerts - Get alert states + +USAGE: + lotus-provider log alerts [command options] [arguments...] + +OPTIONS: + --all get all (active and inactive) alerts (default: false) + --help, -h show help +``` + +## lotus-provider wait-api +``` +NAME: + lotus-provider wait-api - Wait for lotus api to come online + +USAGE: + lotus-provider wait-api [command options] [arguments...] + +CATEGORY: + DEVELOPER + +OPTIONS: + --timeout value duration to wait till fail (default: 30s) + --help, -h show help +``` + +## lotus-provider fetch-params +``` +NAME: + lotus-provider fetch-params - Fetch proving parameters + +USAGE: + lotus-provider fetch-params [command options] [sectorSize] + +CATEGORY: + DEVELOPER + +OPTIONS: + --help, -h show help +``` diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index 733723bad..1da1165cf 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.25.1 + 1.25.2 COMMANDS: run Start lotus worker @@ -34,6 +34,33 @@ NAME: USAGE: lotus-worker run [command options] [arguments...] +DESCRIPTION: + Run lotus-worker. + + --external-pc2 can be used to compute the PreCommit2 inputs externally. + The flag behaves similarly to the related lotus-worker flag, using it in + lotus-bench may be useful for testing if the external PreCommit2 command is + invoked correctly. + + The command will be called with a number of environment variables set: + * EXTSEAL_PC2_SECTOR_NUM: the sector number + * EXTSEAL_PC2_SECTOR_MINER: the miner id + * EXTSEAL_PC2_PROOF_TYPE: the proof type + * EXTSEAL_PC2_SECTOR_SIZE: the sector size in bytes + * EXTSEAL_PC2_CACHE: the path to the cache directory + * EXTSEAL_PC2_SEALED: the path to the sealed sector file (initialized with unsealed data by the caller) + * EXTSEAL_PC2_PC1OUT: output from rust-fil-proofs precommit1 phase (base64 encoded json) + + The command is expected to: + * Create cache sc-02-data-tree-r* files + * Create cache sc-02-data-tree-c* files + * Create cache p_aux / t_aux files + * Transform the sealed file in place + + Example invocation of lotus-bench as external executor: + './lotus-bench simple precommit2 --sector-size $EXTSEAL_PC2_SECTOR_SIZE $EXTSEAL_PC2_SEALED $EXTSEAL_PC2_CACHE $EXTSEAL_PC2_PC1OUT' + + OPTIONS: --listen value host address and port the worker api will listen on (default: "0.0.0.0:3456") [$LOTUS_WORKER_LISTEN] --no-local-storage don't use storageminer repo for sector storage (default: false) [$LOTUS_WORKER_NO_LOCAL_STORAGE] @@ -57,6 +84,7 @@ OPTIONS: --timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") [$LOTUS_WORKER_TIMEOUT] --http-server-timeout value (default: "30s") --data-cid Run the data-cid task. true|false (default: inherits --addpiece) + --external-pc2 value command for computing PC2 externally --help, -h show help ``` diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index c7a2cb09a..5e451dcad 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.25.1 + 1.25.2 COMMANDS: daemon Start a lotus daemon process @@ -1807,8 +1807,16 @@ OPTIONS: --help, -h show help ``` -#### lotus state sector, sector-info +### lotus state sector ``` +NAME: + lotus state sector - Get miner sector info + +USAGE: + lotus state sector [command options] [minerAddress] [sectorNumber] + +OPTIONS: + --help, -h show help ``` ### lotus state get-actor @@ -1937,12 +1945,29 @@ OPTIONS: --help, -h show help ``` -#### lotus state wait-msg, wait-message +### lotus state wait-msg ``` +NAME: + lotus state wait-msg - Wait for a message to appear on chain + +USAGE: + lotus state wait-msg [command options] [messageCid] + +OPTIONS: + --timeout value (default: "10m") + --help, -h show help ``` -#### lotus state search-msg, search-message +### lotus state search-msg ``` +NAME: + lotus state search-msg - Search to see whether a message has appeared on chain + +USAGE: + lotus state search-msg [command options] [messageCid] + +OPTIONS: + --help, -h show help ``` ### lotus state miner-info @@ -2080,8 +2105,17 @@ OPTIONS: --help, -h show help ``` -#### lotus chain get-block, getblock +### lotus chain get-block ``` +NAME: + lotus chain get-block - Get a block and print its details + +USAGE: + lotus chain get-block [command options] [blockCid] + +OPTIONS: + --raw print just the raw block header (default: false) + --help, -h show help ``` ### lotus chain read-obj @@ -2132,16 +2166,46 @@ OPTIONS: --help, -h show help ``` -##### lotus chain getmessage, get-message, get-msg +### lotus chain getmessage ``` +NAME: + lotus chain getmessage - Get and print a message by its cid + +USAGE: + lotus chain getmessage [command options] [messageCid] + +OPTIONS: + --help, -h show help ``` -#### lotus chain sethead, set-head +### lotus chain sethead ``` +NAME: + lotus chain sethead - manually set the local nodes head tipset (Caution: normally only used for recovery) + +USAGE: + lotus chain sethead [command options] [tipsetkey] + +OPTIONS: + --genesis reset head to genesis (default: false) + --epoch value reset head to given epoch (default: 0) + --help, -h show help ``` -#### lotus chain list, love +### lotus chain list ``` +NAME: + lotus chain list - View a segment of the chain + +USAGE: + lotus chain list [command options] [arguments...] + +OPTIONS: + --height value (default: current head) + --count value (default: 30) + --format value specify the format to print out tipsets (default: ": (