diff --git a/.circleci/config.yml b/.circleci/config.yml index c5f358a85..cb7756274 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,14 +7,19 @@ executors: golang: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.18.8 + - image: cimg/go:1.19.7 + resource_class: medium+ + golang-2xl: + docker: + # Must match GO_VERSION_MIN in project root + - image: cimg/go:1.19.7 resource_class: 2xlarge ubuntu: docker: - image: ubuntu:20.04 commands: - prepare: + build-platform-specific: parameters: linux: default: true @@ -31,22 +36,13 @@ commands: steps: - checkout - git_fetch_all_tags + - run: git submodule sync + - run: git submodule update --init - when: condition: <> steps: - - run: - name: Check Go Version - command: | - v=`go version | { read _ _ v _; echo ${v#go}; }` - if [[ $v != `cat GO_VERSION_MIN` ]]; then - echo "GO_VERSION_MIN file does not match the go version being used." - echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v." - exit 1 - fi - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev - - run: sudo apt-get install python-is-python3 - + - install-ubuntu-deps + - check-go-version - when: condition: <> steps: @@ -67,8 +63,7 @@ commands: name: Install Rust command: | curl https://sh.rustup.rs -sSf | sh -s -- -y - - run: git submodule sync - - run: git submodule update --init + - run: make deps lotus download-params: steps: - restore_cache: @@ -77,7 +72,7 @@ commands: - 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - - run: ./lotus fetch-params 2048 + - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache key: 'v26-2k-lotus-params' @@ -99,12 +94,43 @@ commands: name: fetch all tags command: | git fetch --all + install-ubuntu-deps: + steps: + - run: sudo apt-get update + - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev + check-go-version: + steps: + - run: | + v=`go version | { read _ _ v _; echo ${v#go}; }` + if [[ $v != `cat GO_VERSION_MIN` ]]; then + echo "GO_VERSION_MIN file does not match the go version being used." + echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v." + exit 1 + fi jobs: + build: + executor: golang + working_directory: ~/lotus + steps: + - checkout + - git_fetch_all_tags + - run: git submodule sync + - run: git submodule update --init + - install-ubuntu-deps + - check-go-version + - run: make deps lotus + - persist_to_workspace: + root: ~/ + paths: + - "lotus" mod-tidy-check: executor: golang + working_directory: ~/lotus steps: - - prepare + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: go mod tidy -v - run: name: Check git diff @@ -115,13 +141,14 @@ jobs: test: description: | Run tests with gotestsum. + working_directory: ~/lotus parameters: &test-params executor: type: executor default: golang go-test-flags: type: string - default: "-timeout 30m" + default: "-timeout 20m" description: Flags passed to go test. target: type: string @@ -130,21 +157,22 @@ jobs: proofs-log-test: type: string default: "0" + get-params: + type: boolean + default: false suite: type: string default: unit description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: standard-verbose - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format executor: << parameters.executor >> steps: - - prepare - - run: - command: make deps lotus - no_output_timeout: 30m - - download-params + - install-ubuntu-deps + - attach_workspace: + at: ~/ + - when: + condition: << parameters.get-params >> + steps: + - download-params - run: name: go test environment: @@ -155,12 +183,11 @@ jobs: mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-artifacts gotestsum \ - --format << parameters.gotestsum-format >> \ + --format standard-verbose \ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \ - -- \ - << parameters.go-test-flags >> \ - << parameters.target >> + --packages="<< parameters.target >>" \ + -- << parameters.go-test-flags >> no_output_timeout: 30m - store_test_results: path: /tmp/test-reports @@ -168,6 +195,7 @@ jobs: path: /tmp/test-artifacts/<< parameters.suite >>.json test-conformance: + working_directory: ~/lotus description: | Run tests using a corpus of interoperable test vectors for Filecoin implementations to test their correctness and compliance with the Filecoin @@ -183,10 +211,9 @@ jobs: submodule is used. executor: << parameters.executor >> steps: - - prepare - - run: - command: make deps lotus - no_output_timeout: 30m + - install-ubuntu-deps + - attach_workspace: + at: ~/ - download-params - when: condition: @@ -229,7 +256,7 @@ jobs: build-linux-amd64: executor: golang steps: - - prepare + - build-platform-specific - run: make lotus lotus-miner lotus-worker - run: name: check tag and version output match @@ -248,7 +275,7 @@ jobs: macos: xcode: "13.4.1" steps: - - prepare: + - build-platform-specific: linux: false darwin: true darwin-architecture: amd64 @@ -272,7 +299,7 @@ jobs: resource_class: filecoin-project/self-hosted-m1 steps: - run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV" - - prepare: + - build-platform-specific: linux: false darwin: true darwin-architecture: arm64 @@ -330,16 +357,18 @@ jobs: gofmt: executor: golang + working_directory: ~/lotus steps: - - prepare - run: command: "! go fmt ./... 2>&1 | read" gen-check: executor: golang + working_directory: ~/lotus steps: - - prepare - - run: make deps + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: go install golang.org/x/tools/cmd/goimports - run: go install github.com/hannahhoward/cbor-gen-for - run: make gen @@ -349,32 +378,29 @@ jobs: docs-check: executor: golang + working_directory: ~/lotus steps: - - prepare + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: go install golang.org/x/tools/cmd/goimports - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker - - run: make deps - run: make docsgen - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker - run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet - lint: &lint + lint-all: description: | Run golangci-lint. + working_directory: ~/lotus parameters: executor: type: executor default: golang - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. args: type: string default: '' @@ -382,17 +408,14 @@ jobs: Arguments to pass to golangci-lint executor: << parameters.executor >> steps: - - prepare - - run: - command: make deps - no_output_timeout: 30m + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: name: Lint command: | - golangci-lint run -v --timeout 2m \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-all: - <<: *lint + golangci-lint run -v --timeout 10m \ + --concurrency 4 << parameters.args >> build-docker: description: > @@ -494,432 +517,541 @@ jobs: extra_build_args: --target <> --build-arg GOFLAGS=-tags=<> workflows: - version: 2.1 ci: jobs: + - build - lint-all: - concurrency: "16" # expend all docker 2xlarge CPUs. - - mod-tidy-check - - gofmt - - gen-check - - docs-check + requires: + - build + - mod-tidy-check: + requires: + - build + - gofmt: + requires: + - build + - gen-check: + requires: + - build + - docs-check: + requires: + - build - test: name: test-itest-api + requires: + - build suite: itest-api target: "./itests/api_test.go" - - test: name: test-itest-batch_deal + requires: + - build suite: itest-batch_deal target: "./itests/batch_deal_test.go" - - test: name: test-itest-ccupgrade + requires: + - build suite: itest-ccupgrade target: "./itests/ccupgrade_test.go" - - test: name: test-itest-cli + requires: + - build suite: itest-cli target: "./itests/cli_test.go" - - test: name: test-itest-deadlines + requires: + - build suite: itest-deadlines target: "./itests/deadlines_test.go" - - test: name: test-itest-deals_512mb + requires: + - build suite: itest-deals_512mb target: "./itests/deals_512mb_test.go" - - test: name: test-itest-deals_anycid + requires: + - build suite: itest-deals_anycid target: "./itests/deals_anycid_test.go" - - test: name: test-itest-deals_concurrent + requires: + - build suite: itest-deals_concurrent target: "./itests/deals_concurrent_test.go" - + executor: golang-2xl - test: name: test-itest-deals_invalid_utf8_label + requires: + - build suite: itest-deals_invalid_utf8_label target: "./itests/deals_invalid_utf8_label_test.go" - - test: name: test-itest-deals_max_staging_deals + requires: + - build suite: itest-deals_max_staging_deals target: "./itests/deals_max_staging_deals_test.go" - - test: name: test-itest-deals_offline + requires: + - build suite: itest-deals_offline target: "./itests/deals_offline_test.go" - - test: name: test-itest-deals_padding + requires: + - build suite: itest-deals_padding target: "./itests/deals_padding_test.go" - - test: name: test-itest-deals_partial_retrieval_dm-level + requires: + - build suite: itest-deals_partial_retrieval_dm-level target: "./itests/deals_partial_retrieval_dm-level_test.go" - - test: name: test-itest-deals_partial_retrieval + requires: + - build suite: itest-deals_partial_retrieval target: "./itests/deals_partial_retrieval_test.go" - - test: name: test-itest-deals_power + requires: + - build suite: itest-deals_power target: "./itests/deals_power_test.go" - - test: name: test-itest-deals_pricing + requires: + - build suite: itest-deals_pricing target: "./itests/deals_pricing_test.go" - - test: name: test-itest-deals_publish + requires: + - build suite: itest-deals_publish target: "./itests/deals_publish_test.go" - - test: name: test-itest-deals_remote_retrieval + requires: + - build suite: itest-deals_remote_retrieval target: "./itests/deals_remote_retrieval_test.go" - - test: name: test-itest-deals_retry_deal_no_funds + requires: + - build suite: itest-deals_retry_deal_no_funds target: "./itests/deals_retry_deal_no_funds_test.go" - - test: name: test-itest-deals + requires: + - build suite: itest-deals target: "./itests/deals_test.go" - - test: name: test-itest-decode_params + requires: + - build suite: itest-decode_params target: "./itests/decode_params_test.go" - - test: name: test-itest-dup_mpool_messages + requires: + - build suite: itest-dup_mpool_messages target: "./itests/dup_mpool_messages_test.go" - - test: name: test-itest-eth_account_abstraction + requires: + - build suite: itest-eth_account_abstraction target: "./itests/eth_account_abstraction_test.go" - - test: name: test-itest-eth_api + requires: + - build suite: itest-eth_api target: "./itests/eth_api_test.go" - - test: name: test-itest-eth_balance + requires: + - build suite: itest-eth_balance target: "./itests/eth_balance_test.go" - - test: name: test-itest-eth_block_hash + requires: + - build suite: itest-eth_block_hash target: "./itests/eth_block_hash_test.go" - - test: name: test-itest-eth_bytecode + requires: + - build suite: itest-eth_bytecode target: "./itests/eth_bytecode_test.go" - - test: name: test-itest-eth_config + requires: + - build suite: itest-eth_config target: "./itests/eth_config_test.go" - - test: name: test-itest-eth_conformance + requires: + - build suite: itest-eth_conformance target: "./itests/eth_conformance_test.go" - - test: name: test-itest-eth_deploy + requires: + - build suite: itest-eth_deploy target: "./itests/eth_deploy_test.go" - - test: name: test-itest-eth_fee_history + requires: + - build suite: itest-eth_fee_history target: "./itests/eth_fee_history_test.go" - - test: name: test-itest-eth_filter + requires: + - build suite: itest-eth_filter target: "./itests/eth_filter_test.go" - - test: name: test-itest-eth_hash_lookup + requires: + - build suite: itest-eth_hash_lookup target: "./itests/eth_hash_lookup_test.go" - - test: name: test-itest-eth_transactions + requires: + - build suite: itest-eth_transactions target: "./itests/eth_transactions_test.go" - - test: name: test-itest-fevm_address + requires: + - build suite: itest-fevm_address target: "./itests/fevm_address_test.go" - - test: name: test-itest-fevm_events + requires: + - build suite: itest-fevm_events target: "./itests/fevm_events_test.go" - - test: name: test-itest-fevm + requires: + - build suite: itest-fevm target: "./itests/fevm_test.go" - - test: name: test-itest-gas_estimation + requires: + - build suite: itest-gas_estimation target: "./itests/gas_estimation_test.go" - - test: name: test-itest-gateway + requires: + - build suite: itest-gateway target: "./itests/gateway_test.go" - - test: name: test-itest-get_messages_in_ts + requires: + - build suite: itest-get_messages_in_ts target: "./itests/get_messages_in_ts_test.go" - - test: name: test-itest-lite_migration + requires: + - build suite: itest-lite_migration target: "./itests/lite_migration_test.go" - - test: name: test-itest-lookup_robust_address + requires: + - build suite: itest-lookup_robust_address target: "./itests/lookup_robust_address_test.go" - - test: name: test-itest-mempool + requires: + - build suite: itest-mempool target: "./itests/mempool_test.go" - - test: name: test-itest-migration + requires: + - build suite: itest-migration target: "./itests/migration_test.go" - - test: name: test-itest-mpool_msg_uuid + requires: + - build suite: itest-mpool_msg_uuid target: "./itests/mpool_msg_uuid_test.go" - - test: name: test-itest-mpool_push_with_uuid + requires: + - build suite: itest-mpool_push_with_uuid target: "./itests/mpool_push_with_uuid_test.go" - - test: name: test-itest-multisig + requires: + - build suite: itest-multisig target: "./itests/multisig_test.go" - - test: name: test-itest-net + requires: + - build suite: itest-net target: "./itests/net_test.go" - - test: name: test-itest-nonce + requires: + - build suite: itest-nonce target: "./itests/nonce_test.go" - - test: name: test-itest-path_detach_redeclare + requires: + - build suite: itest-path_detach_redeclare target: "./itests/path_detach_redeclare_test.go" - - test: name: test-itest-path_type_filters + requires: + - build suite: itest-path_type_filters target: "./itests/path_type_filters_test.go" - - test: name: test-itest-paych_api + requires: + - build suite: itest-paych_api target: "./itests/paych_api_test.go" - - test: name: test-itest-paych_cli + requires: + - build suite: itest-paych_cli target: "./itests/paych_cli_test.go" - - test: name: test-itest-pending_deal_allocation + requires: + - build suite: itest-pending_deal_allocation target: "./itests/pending_deal_allocation_test.go" - - test: name: test-itest-raft_messagesigner + requires: + - build suite: itest-raft_messagesigner target: "./itests/raft_messagesigner_test.go" - - test: name: test-itest-remove_verifreg_datacap + requires: + - build suite: itest-remove_verifreg_datacap target: "./itests/remove_verifreg_datacap_test.go" - - test: name: test-itest-sdr_upgrade + requires: + - build suite: itest-sdr_upgrade target: "./itests/sdr_upgrade_test.go" - - test: name: test-itest-sector_finalize_early + requires: + - build suite: itest-sector_finalize_early target: "./itests/sector_finalize_early_test.go" - - test: name: test-itest-sector_import_full + requires: + - build suite: itest-sector_import_full target: "./itests/sector_import_full_test.go" - - test: name: test-itest-sector_import_simple + requires: + - build suite: itest-sector_import_simple target: "./itests/sector_import_simple_test.go" - - test: name: test-itest-sector_make_cc_avail + requires: + - build suite: itest-sector_make_cc_avail target: "./itests/sector_make_cc_avail_test.go" - - test: name: test-itest-sector_miner_collateral + requires: + - build suite: itest-sector_miner_collateral target: "./itests/sector_miner_collateral_test.go" - - test: name: test-itest-sector_numassign + requires: + - build suite: itest-sector_numassign target: "./itests/sector_numassign_test.go" - - test: name: test-itest-sector_pledge + requires: + - build suite: itest-sector_pledge target: "./itests/sector_pledge_test.go" - - test: name: test-itest-sector_prefer_no_upgrade + requires: + - build suite: itest-sector_prefer_no_upgrade target: "./itests/sector_prefer_no_upgrade_test.go" - - test: name: test-itest-sector_revert_available + requires: + - build suite: itest-sector_revert_available target: "./itests/sector_revert_available_test.go" - - test: name: test-itest-sector_terminate + requires: + - build suite: itest-sector_terminate target: "./itests/sector_terminate_test.go" - - test: name: test-itest-sector_unseal + requires: + - build suite: itest-sector_unseal target: "./itests/sector_unseal_test.go" - - test: name: test-itest-self_sent_txn + requires: + - build suite: itest-self_sent_txn target: "./itests/self_sent_txn_test.go" - - test: name: test-itest-splitstore + requires: + - build suite: itest-splitstore target: "./itests/splitstore_test.go" - - test: name: test-itest-tape + requires: + - build suite: itest-tape target: "./itests/tape_test.go" - - test: name: test-itest-verifreg + requires: + - build suite: itest-verifreg target: "./itests/verifreg_test.go" - - test: name: test-itest-wdpost_config + requires: + - build suite: itest-wdpost_config target: "./itests/wdpost_config_test.go" - - test: name: test-itest-wdpost_dispute + requires: + - build suite: itest-wdpost_dispute target: "./itests/wdpost_dispute_test.go" - - test: name: test-itest-wdpost_no_miner_storage + requires: + - build suite: itest-wdpost_no_miner_storage target: "./itests/wdpost_no_miner_storage_test.go" - - test: name: test-itest-wdpost + requires: + - build suite: itest-wdpost target: "./itests/wdpost_test.go" - + get-params: true + - test: name: test-itest-wdpost_worker_config + requires: + - build suite: itest-wdpost_worker_config target: "./itests/wdpost_worker_config_test.go" - + executor: golang-2xl - test: name: test-itest-worker + requires: + - build suite: itest-worker target: "./itests/worker_test.go" - + executor: golang-2xl - test: name: test-itest-worker_upgrade + requires: + - build suite: itest-worker_upgrade target: "./itests/worker_upgrade_test.go" - - test: name: test-unit-cli + requires: + - build suite: utest-unit-cli target: "./cli/... ./cmd/... ./api/..." + get-params: true - test: name: test-unit-node + requires: + - build suite: utest-unit-node target: "./node/..." + - test: name: test-unit-rest + requires: + - build suite: utest-unit-rest target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..." + executor: golang-2xl - test: name: test-unit-storage + requires: + - build suite: utest-unit-storage target: "./storage/... ./extern/..." + - test: go-test-flags: "-run=TestMulticoreSDR" + requires: + - build suite: multicore-sdr-check target: "./storage/sealer/ffiwrapper" proofs-log-test: "1" - test-conformance: + requires: + - build suite: conformance target: "./conformance" diff --git a/.circleci/template.yml b/.circleci/template.yml index 724571ac2..382965615 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -7,14 +7,19 @@ executors: golang: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.18.8 + - image: cimg/go:1.19.7 + resource_class: medium+ + golang-2xl: + docker: + # Must match GO_VERSION_MIN in project root + - image: cimg/go:1.19.7 resource_class: 2xlarge ubuntu: docker: - image: ubuntu:20.04 commands: - prepare: + build-platform-specific: parameters: linux: default: true @@ -31,22 +36,13 @@ commands: steps: - checkout - git_fetch_all_tags + - run: git submodule sync + - run: git submodule update --init - when: condition: <> steps: - - run: - name: Check Go Version - command: | - v=`go version | { read _ _ v _; echo ${v#go}; }` - if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then - echo "GO_VERSION_MIN file does not match the go version being used." - echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v." - exit 1 - fi - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev - - run: sudo apt-get install python-is-python3 - + - install-ubuntu-deps + - check-go-version - when: condition: <> steps: @@ -67,8 +63,7 @@ commands: name: Install Rust command: | curl https://sh.rustup.rs -sSf | sh -s -- -y - - run: git submodule sync - - run: git submodule update --init + - run: make deps lotus download-params: steps: - restore_cache: @@ -77,7 +72,7 @@ commands: - 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - - run: ./lotus fetch-params 2048 + - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache key: 'v26-2k-lotus-params' @@ -99,12 +94,43 @@ commands: name: fetch all tags command: | git fetch --all + install-ubuntu-deps: + steps: + - run: sudo apt-get update + - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev + check-go-version: + steps: + - run: | + v=`go version | { read _ _ v _; echo ${v#go}; }` + if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then + echo "GO_VERSION_MIN file does not match the go version being used." + echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v." + exit 1 + fi jobs: + build: + executor: golang + working_directory: ~/lotus + steps: + - checkout + - git_fetch_all_tags + - run: git submodule sync + - run: git submodule update --init + - install-ubuntu-deps + - check-go-version + - run: make deps lotus + - persist_to_workspace: + root: ~/ + paths: + - "lotus" mod-tidy-check: executor: golang + working_directory: ~/lotus steps: - - prepare + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: go mod tidy -v - run: name: Check git diff @@ -115,13 +141,14 @@ jobs: test: description: | Run tests with gotestsum. + working_directory: ~/lotus parameters: &test-params executor: type: executor default: golang go-test-flags: type: string - default: "-timeout 30m" + default: "-timeout 20m" description: Flags passed to go test. target: type: string @@ -130,21 +157,22 @@ jobs: proofs-log-test: type: string default: "0" + get-params: + type: boolean + default: false suite: type: string default: unit description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: standard-verbose - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format executor: << parameters.executor >> steps: - - prepare - - run: - command: make deps lotus - no_output_timeout: 30m - - download-params + - install-ubuntu-deps + - attach_workspace: + at: ~/ + - when: + condition: << parameters.get-params >> + steps: + - download-params - run: name: go test environment: @@ -155,12 +183,11 @@ jobs: mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-artifacts gotestsum \ - --format << parameters.gotestsum-format >> \ + --format standard-verbose \ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \ - -- \ - << parameters.go-test-flags >> \ - << parameters.target >> + --packages="<< parameters.target >>" \ + -- << parameters.go-test-flags >> no_output_timeout: 30m - store_test_results: path: /tmp/test-reports @@ -168,6 +195,7 @@ jobs: path: /tmp/test-artifacts/<< parameters.suite >>.json test-conformance: + working_directory: ~/lotus description: | Run tests using a corpus of interoperable test vectors for Filecoin implementations to test their correctness and compliance with the Filecoin @@ -183,10 +211,9 @@ jobs: submodule is used. executor: << parameters.executor >> steps: - - prepare - - run: - command: make deps lotus - no_output_timeout: 30m + - install-ubuntu-deps + - attach_workspace: + at: ~/ - download-params - when: condition: @@ -229,7 +256,7 @@ jobs: build-linux-amd64: executor: golang steps: - - prepare + - build-platform-specific - run: make lotus lotus-miner lotus-worker - run: name: check tag and version output match @@ -248,7 +275,7 @@ jobs: macos: xcode: "13.4.1" steps: - - prepare: + - build-platform-specific: linux: false darwin: true darwin-architecture: amd64 @@ -272,7 +299,7 @@ jobs: resource_class: filecoin-project/self-hosted-m1 steps: - run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV" - - prepare: + - build-platform-specific: linux: false darwin: true darwin-architecture: arm64 @@ -330,16 +357,18 @@ jobs: gofmt: executor: golang + working_directory: ~/lotus steps: - - prepare - run: command: "! go fmt ./... 2>&1 | read" gen-check: executor: golang + working_directory: ~/lotus steps: - - prepare - - run: make deps + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: go install golang.org/x/tools/cmd/goimports - run: go install github.com/hannahhoward/cbor-gen-for - run: make gen @@ -349,32 +378,29 @@ jobs: docs-check: executor: golang + working_directory: ~/lotus steps: - - prepare + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: go install golang.org/x/tools/cmd/goimports - run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full - run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner - run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker - - run: make deps - run: make docsgen - run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full - run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner - run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker - run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet - lint: &lint + lint-all: description: | Run golangci-lint. + working_directory: ~/lotus parameters: executor: type: executor default: golang - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. args: type: string default: '' @@ -382,17 +408,14 @@ jobs: Arguments to pass to golangci-lint executor: << parameters.executor >> steps: - - prepare - - run: - command: make deps - no_output_timeout: 30m + - install-ubuntu-deps + - attach_workspace: + at: ~/ - run: name: Lint command: | - golangci-lint run -v --timeout 2m \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-all: - <<: *lint + golangci-lint run -v --timeout 10m \ + --concurrency 4 << parameters.args >> build-docker: description: > @@ -494,37 +517,61 @@ jobs: extra_build_args: --target <> --build-arg GOFLAGS=-tags=<> workflows: - version: 2.1 ci: jobs: + - build - lint-all: - concurrency: "16" # expend all docker 2xlarge CPUs. - - mod-tidy-check - - gofmt - - gen-check - - docs-check + requires: + - build + - mod-tidy-check: + requires: + - build + - gofmt: + requires: + - build + - gen-check: + requires: + - build + - docs-check: + requires: + - build [[- range $file := .ItestFiles -]] [[ with $name := $file | stripSuffix ]] - test: name: test-itest-[[ $name ]] + requires: + - build suite: itest-[[ $name ]] target: "./itests/[[ $file ]]" - [[ end ]] - [[- end -]] + [[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]] + executor: golang-2xl + [[- end]] + [[- if (eq $name "wdpost")]] + get-params: true + [[end]] + [[- end ]][[- end]] - [[range $suite, $pkgs := .UnitSuites]] + [[- range $suite, $pkgs := .UnitSuites]] - test: name: test-[[ $suite ]] + requires: + - build suite: utest-[[ $suite ]] target: "[[ $pkgs ]]" + [[if eq $suite "unit-cli"]]get-params: true[[end]] + [[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]] [[- end]] - test: go-test-flags: "-run=TestMulticoreSDR" + requires: + - build suite: multicore-sdr-check target: "./storage/sealer/ffiwrapper" proofs-log-test: "1" - test-conformance: + requires: + - build suite: conformance target: "./conformance" diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 981a80256..e4c0c7f26 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -9,15 +9,9 @@ body: options: - label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). required: true - - label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions). - required: true - - label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead. - required: true - - label: This is **not** an enhancement request. If it is, please file a [improvement suggestion](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fenhancement&template=enhancement.yml) instead. - required: true - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. required: true - - label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), or the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these. + - label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these. required: true - label: I did not make any code changes to lotus. required: false @@ -28,19 +22,11 @@ body: options: - label: lotus daemon - chain sync required: false - - label: lotus miner - mining and block production + - label: lotus fvm/fevm - Lotus FVM and FEVM interactions required: false - label: lotus miner/worker - sealing required: false - - label: lotus miner - proving(WindowPoSt) - required: false - - label: lotus miner/market - storage deal - required: false - - label: lotus miner/market - retrieval deal - required: false - - label: lotus miner/market - data transfer - required: false - - label: lotus client + - label: lotus miner - proving(WindowPoSt/WinningPoSt) required: false - label: lotus JSON-RPC API required: false @@ -56,22 +42,33 @@ body: description: Enter the output of `lotus version` and `lotus-miner version` if applicable. placeholder: | e.g. - Daemon:1.11.0-rc2+debug+git.0519cd371.dirty+api1.3.0 - Local: lotus version 1.11.0-rc2+debug+git.0519cd371.dirty + Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0 + Local: lotus-miner version 1.19.0+mainnet+git.64059ca87 validations: required: true +- type: textarea + id: ReproSteps + attributes: + label: Repro Steps + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + validations: + required: false - type: textarea id: Description attributes: label: Describe the Bug description: | This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: - * What you were doding when you experienced the bug? + * What you were doing when you experienced the bug? * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). * What is the expected behaviour? * For sealing issues, include the output of `lotus-miner sectors status --log ` for the failed sector(s). * For proving issues, include the output of `lotus-miner proving` info. - * For deal making issues, include the output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question. validations: required: true - type: textarea @@ -83,18 +80,6 @@ body: Please provide debug logs of the problem, remember you can get set log level control for: * lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://lotus.filecoin.io/lotus/configure/defaults/#log-level-control). * lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level - If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem. + If you don't provide detailed logs when you raise the issue it will almost certainly be the first request we make before furthur diagnosing the problem. validations: - required: true -- type: textarea - id: RepoSteps - attributes: - label: Repo Steps - description: "Steps to reproduce the behavior" - value: | - 1. Run '...' - 2. Do '...' - 3. See error '...' - ... - validations: - required: false + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..e5ae608b3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: Ask a question about Lotus or get support + url: https://github.com/filecoin-project/lotus/discussions/new/choose + about: Ask a question or request support for using Lotus + - name: Filecoin protocol feature or enhancement + url: https://github.com/filecoin-project/FIPs/discussions/new/choose + about: Write a discussion in the Filecoin Improvement Proposal repo diff --git a/.github/ISSUE_TEMPLATE/enhancement.yml b/.github/ISSUE_TEMPLATE/enhancement.yml index da662688d..d367feeac 100644 --- a/.github/ISSUE_TEMPLATE/enhancement.yml +++ b/.github/ISSUE_TEMPLATE/enhancement.yml @@ -7,13 +7,7 @@ body: label: Checklist description: Please check off the following boxes before continuing to create an improvement suggestion! options: - - label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md). - required: true - - label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead. - required: true - - label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`. - required: true - - label: I **have** a specific, actionable, and well motivated improvement to propose. + - label: I **have** a specific, actionable, and well motivated improvement to an existing lotus feature. required: true - type: checkboxes attributes: @@ -22,19 +16,11 @@ body: options: - label: lotus daemon - chain sync required: false - - label: lotus miner - mining and block production + - label: lotus fvm/fevm - Lotus FVM and FEVM interactions required: false - label: lotus miner/worker - sealing required: false - - label: lotus miner - proving(WindowPoSt) - required: false - - label: lotus miner/market - storage deal - required: false - - label: lotus miner/market - retrieval deal - required: false - - label: lotus miner/market - data transfer - required: false - - label: lotus client + - label: lotus miner - proving(WindowPoSt/WinningPoSt) required: false - label: lotus JSON-RPC API required: false @@ -45,9 +31,17 @@ body: - type: textarea id: request attributes: - label: Improvement Suggestion - description: A clear and concise description of what the motivation or the current problem is and what is the suggested improvement? - placeholder: Ex. Currently lotus... However, as a storage provider, I'd like... + label: Enhancement Suggestion + description: A clear and concise description of the suggested enhancement? + placeholder: Ex. Currently lotus... However it would be great if [enhancement] was implemented... With the ability to... + validations: + required: true +- type: textarea + id: request + attributes: + label: Use-Case + description: How would this enhancement help you? + placeholder: Ex. With the [enhancement] node operators would be able to... For Storage Providers it would enable... validations: required: true diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 210549095..76493e9c1 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -7,8 +7,6 @@ body: label: Checklist description: Please check off the following boxes before continuing to create a new feature request! options: - - label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md). - required: true - label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`. required: true - label: I **have** a specific, actionable, and well motivated feature request to propose. @@ -20,19 +18,11 @@ body: options: - label: lotus daemon - chain sync required: false - - label: lotus miner - mining and block production + - label: lotus fvm/fevm - Lotus FVM and FEVM interactions required: false - label: lotus miner/worker - sealing required: false - - label: lotus miner - proving(WindowPoSt) - required: false - - label: lotus miner/market - storage deal - required: false - - label: lotus miner/market - retrieval deal - required: false - - label: lotus miner/market - data transfer - required: false - - label: lotus client + - label: lotus miner - proving(WindowPoSt/WinningPoSt) required: false - label: lotus JSON-RPC API required: false @@ -56,7 +46,7 @@ body: validations: required: true - type: textarea - id: alternates + id: alternatives attributes: label: Describe alternatives you've considered description: A clear and concise description of any alternative solutions or features you've considered. @@ -69,4 +59,3 @@ body: description: Add any other context, design docs or screenshots about the feature request here. validations: required: false - diff --git a/.github/ISSUE_TEMPLATE/service_developer_bug_report.yml b/.github/ISSUE_TEMPLATE/service_developer_bug_report.yml new file mode 100644 index 000000000..8174d13f5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/service_developer_bug_report.yml @@ -0,0 +1,83 @@ +name: "Bug Report - developer/service provider" +description: "Bug report template about FEVM/FVM for developers/service providers" +labels: [need/triage, kind/bug, area/fevm] +body: +- type: checkboxes + attributes: + label: Checklist + description: Please check off the following boxes before continuing to file a bug report! + options: + - label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + required: true + - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. + required: true + - label: I did not make any code changes to lotus. + required: false +- type: checkboxes + attributes: + label: Lotus component + description: Please select the lotus component you are filing a bug for + options: + - label: lotus Ethereum RPC + required: false + - label: lotus FVM - Lotus FVM interactions + required: false + - label: FEVM tooling + required: false + - label: Other + required: false +- type: textarea + id: version + attributes: + label: Lotus Version + render: text + description: Enter the output of `lotus version` if applicable. + placeholder: | + e.g. + Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0 + Local: lotus-miner version 1.19.0+mainnet+git.64059ca87 + validations: + required: true +- type: textarea + id: repro + attributes: + label: Repro Steps + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + validations: + required: false +- type: textarea + id: Description + attributes: + label: Describe the Bug + description: | + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + * What you were doing when you experienced the bug? What are you trying to build? + * Any *error* messages and logs you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). + * What is the expected behaviour? Links to the actual code? + validations: + required: true +- type: textarea + id: toolingInfo + attributes: + label: Tooling + render: text + description: | + What kind of tooling are you using: + * Are you using ether.js, Alchemy, Hardhat, etc. + validations: + required: true +- type: textarea + id: extraInfo + attributes: + label: Configuration Options + render: text + description: | + Please provide your updated FEVM related configuration options, or custome enviroment variables related to Lotus FEVM + * lotus: use `lotus config updated` to get your configuration options, and copy the [FEVM] section + validations: + required: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cca59561..650becb25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,445 @@ # Lotus changelog +# v1.23.0 / 2023-04-21 + +This is the stable feature release for the upcoming MANDATORY network upgrade at `2023-04-27T13:00:00Z`, epoch `2809800`. This feature release delivers the nv19 Lighting and nv20 Thunder network upgrade for mainnet, and includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers. + +## ☢️ Upgrade Warnings ☢️ + +Please read carefully through the **upgrade warnings** section if you are upgrading from a v1.20.X release, or the v1.22.0 release. If you are upgrading from a v1.21.0-rcX these warnings should be familiar to you. + +- Starting from this release, the SplitStore feature is automatically activated on new nodes. However, for existing Lotus users, you need to explicitly configure SplitStore by uncommenting the `EnableSplitstore` option in your `config.toml` file. To enable SplitStore, set `EnableSplitstore=true`, and to disable it, set `EnableSplitstore=false`. **It's important to note that your Lotus node will not start unless this configuration is properly set. Set it to false if you are running a full archival node!** +- This feature release requires a **minimum Go version of v1.19.7 or higher to successfully build Lotus**. Additionally, Go version v1.20 and higher is now also supported. +- **Storage Providers:** The proofs libraries now have CUDA enabled by default, which requires you to install (CUDA)[https://lotus.filecoin.io/tutorials/lotus-miner/cuda/] if you haven't already done so. If you prefer to use OpenCL on your GPUs instead, you can use the `FFI_USE_OPENCL=1` flag when building from source. On the other hand, if you want to disable GPUs altogether, you can use the `FFI_NO_GPU=1` environment variable when building from source. +- **Storage Providers:** The `lotus-miner sectors extend` command has been refactored to the functionality of `lotus-miner sectors renew`. +- **Exchanges/Node operators/RPC-providers::** Execution traces (returned from `lotus state exec-trace`, `lotus state replay`, etc.), has changed to account for changes introduced by the by the FVM. **Please make sure to read the `Execution trace format change` section carefully, as these are interface breaking changes** +- **Syncing issues:** If you have been struggling with syncing issues in normal operations you can try to adjust the amount of threads used for more concurrent FMV execution through via the `LOTUS_FVM_CONCURRENCY` enviroment variable. It is set to 4 threads by default. Recommended formula for concurrency == YOUR_RAM/4 , but max during a network upgrade is 24. If you are a Storage Provider and are pushing many messages within a short period of time, exporting `LOTUS_SKIP_APPLY_TS_MESSAGE_CALL_WITH_GAS=1` will also help with keeping in sync. +- **Catching up from a Snapshot:** Users have noticed that catching up sync from a snapshot is taking a lot longer these day. This is largely related to the built-in market actor consuming a lot of computational demand for block validation. A FIP for a short-term mitigation for this is currently in Last Call and will be included network version 19 upgrade if accepted. You [can read the FIP here.](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0060.md) + +## Highlights + +### Execution Trace Format Changes + +Execution traces (returned from `lotus state exec-trace`, `lotus state replay`, etc.), has changed to account for changes introduced by the FVM. Specifically: + +- The `Msg` field no longer matches the Filecoin message format as many of the message fields didn't make sense in on-chain sub-calls. Instead, it now has the fields `To`, `From`, `Value`, `Method`, `Params`, and `ParamsCodec` where `ParamsCodec` is a new field indicating the IPLD codec of the parameters. + - Importantly, the `Msg.CID` field has been removed. This field is still present in top-level invocation results, just not inside the execution trace itself. +- The `MsgRct` field no longer includes a `GasUsed` field and now has a `ReturnCodec` field to indicating the IPLD codec of the return value. +- The `Error` and `Duration` fields have been removed as these are not set by the FVM. The top-level message "invocation result" retains the `Error` and `Duration` fields, they've only been removed from the trace itself. +- Gas Charges no longer include "virtual" gas fields (those starting with `v...`) or source location information (`loc`) as neither field is set by the FVM. + +A note on "codecs": FVM parameters and return values are IPLD blocks where the "codec" specifies the data encoding. The codec will generally be one of: + +- `0x51`, `0x71` - CBOR or DagCBOR. You should generally treat these as equivalent. +- `0x55` - Raw bytes. +- `0x00` - Nothing. If the codec is `0x00`, the parameter and/or return value should be empty and should be treated as "void" (not specified). + +
+ +Old ExecutionTrace: + + +```json +{ + "Msg": { + "Version": 0, + "To": "f01234", + "From": "f04321", + "Nonce": 1, + "Value": "0", + "GasLimit": 0, + "GasFeeCap": "1234", + "GasPremium": "1234", + "Method": 42, + "Params": "", + "CID": { + "/": "bafyxyz....." + }, + }, + "MsgRct": { + "ExitCode": 0, + "Return": "", + "GasUsed": 12345, + }, + "Error": "", + "Duration": 568191845, + "GasCharges": [ + { + "Name": "OnMethodInvocation", + "loc": null, + "tg": 23856, + "cg": 23856, + "sg": 0, + "vtg": 0, + "vcg": 0, + "vsg": 0, + "tt": 0 + }, + { + "Name": "wasm_exec", + "loc": null, + "tg": 1764, + "cg": 1764, + "sg": 0, + "vtg": 0, + "vcg": 0, + "vsg": 0, + "tt": 0 + }, + { + "Name": "OnSyscall", + "loc": null, + "tg": 14000, + "cg": 14000, + "sg": 0, + "vtg": 0, + "vcg": 0, + "vsg": 0, + "tt": 0 + }, + ], + "Subcalls": [ + { + "Msg": { }, + "MsgRct": { }, + "Error": "", + "Duration": 1235, + "GasCharges": [], + "Subcalls": [], + }, + ] +} +``` +
+ +
+ +New ExecutionTrace: + + +```json +{ + "Msg": { + "To": "f01234", + "From": "f04321", + "Value": "0", + "Method": 42, + "Params": "", + "ParamsCodec": 81 + }, + "MsgRct": { + "ExitCode": 0, + "Return": "", + "ReturnCodec": 81 + }, + "GasCharges": [ + { + "Name": "OnMethodInvocation", + "loc": null, + "tg": 23856, + "cg": 23856, + "tt": 0 + }, + { + "Name": "wasm_exec", + "loc": null, + "tg": 1764, + "cg": 1764, + "sg": 0, + "tt": 0 + }, + { + "Name": "OnSyscall", + "loc": null, + "tg": 14000, + "cg": 14000, + "sg": 0, + "tt": 0 + }, + ], + "Subcalls": [ + { + "Msg": { }, + "MsgRct": { }, + "GasCharges": [], + "Subcalls": [], + }, + ] +} +``` + +
+ +**SplitStore** + +This feature release introduces numerous improvements and fixes to tackle SplitStore related issues that has been reported. With this feature release SplitStore is automatically activated by default on new nodes. However, for existing Lotus users, you need to explicitly configure SplitStore by uncommenting the `EnableSplitstore` option in your `config.toml` file. To enable SplitStore, set `EnableSplitstore=true`, and to disable it, set `EnableSplitstore=false`. **It's important to note that your Lotus node will not start unless this configuration is properly set. Set it to false if you are running a full archival node!** + +SplitStore also has some new configuration settings that you can set in your config.toml file: +- `HotstoreMaxSpaceTarget` suggests the max allowed space (in bytes) the hotstore can take. +- `HotstoreMaxSpaceThreshold` a moving GC will be triggered when total moving size exceeds this threshold (in bytes). +- `HotstoreMaxSpaceSafetyBuffer` a safety buffer to prevent moving GC from an overflowing disk. + +The SplitStore also has two new commands: + +- `lotus chain prune hot` is a much less resource-intensive GC and is best suited for situations where you don't have the spare disk space for a full GC. +- `lotus chain prune hot-moving` will run a full moving garbage collection of the hotstore. This commands create a new hotstore before deleting the old one so you need working room in the hotstore directory. The current size of a fully GC'd hotstore is around 295 GiB so you need to make sure you have at least that available. + +You can read more about the new SplitStore commands in [the documentation](https://lotus.filecoin.io/lotus/configure/splitstore/#manual-chain-store-garbage-collection). + +**RPC API improvements** + +This feature release includes all the RPC API improvements made in the Lotus v1.20.x patch releases. It includes an updated FFI that sets the FVM parallelism to 4 by default. + +Node operators with higher memory specs can experiment with setting LOTUS_FVM_CONCURRENCY to higher values, up to 48, to allow for more concurrent FVM execution. + +**Experimental scheduler assigners** + +In this release there are four new expirmental scheduler assigners: + +- The `experiment-spread-qcount` - similar to the spread assigner but also takes into account task counts which are in running/preparing/queued states. +- The `experiment-spread-tasks` - similar to the spread assigner, but counts running tasks on a per-task-type basis +- The `experiment-spread-tasks-qcount` - similar to the spread assigner, but also takes into account task counts which are in running/preparing/queued states, as well as counting running tasks on a per-task-type basis. Check the results for this assigner on ([storage-only lotus-workers here](https://github.com/filecoin-project/lotus/issues/8566#issuecomment-1446978856)). +- The `experiment-random` - In each schedule loop the assinger figures a set of all workers which can handle the task and then picks a random one. Check the results for this assigner on ([storage-only lotus-workers here](https://github.com/filecoin-project/lotus/issues/8566#issuecomment-1447064218)). + +**Graceful shutdown of lotus-workers** +We have cleaned up some commands in the `lotus-worker` to make it less confusing how to gracefully shutting down a `lotus-worker` while there are incoming sealing tasks in the pipeline. To shut down a `lotus-worker` gracefully: + +1. `lotus-worker tasks disable --all` and wait for the worker to finish processing its current tasks. +2. `lotus-worker stop` to detach it and do maintenance/upgrades. + +**CLI speedups** + +The `lotus-miner sector list` is now running in parallel - which should speed up the process from anywhere between 2x-10x+. You can tune it additionally with the `check-parallelism` option in the command. The `Lotus-Miner info` command also has a large speed improvement, as calls to the lotus legacy market has been removed. + +## New features +- feat: splitstore: Pause compaction when out of sync ([filecoin-project/lotus/#10641](https://github.com/filecoin-project/lotus/pull/10641)) + - Pause the SplitStore compaction if the node is out of sync. Resumes the compation when its back in sync. +- feat: splitstore: limit moving gc threads (#10621) ([filecoin-project/lotus/#10621](https://github.com/filecoin-project/lotus/pull/10621)) + - Makes moving gc less likely to cause node falling out of sync. +- feat: splitstore: Update config default value (#10605) ([filecoin-project/lotus/#10605](https://github.com/filecoin-project/lotus/pull/10605)) + - Sets Splitstore HotStoreMaxSpaceTarget config to 650GB as default +- feat: splitstore: Splitstore enabled by default (#10429) ([filecoin-project/lotus#10429](https://github.com/filecoin-project/lotus/pull/10429)) + - Enables SplitStore by default on new Lotus nodes. Existing Lotus users need to explicitly configure +- feat: splitstore: Configure max space used by hotstore and GC makes best effort to respect ([filecoin-project/lotus#10391](https://github.com/filecoin-project/lotus/pull/10391)) + - Adds three new configs for setting the maximum allowed space the hotstore can take. +- feat: splitstore: Badger GC of hotstore command ([filecoin-project/lotus#10387](https://github.com/filecoin-project/lotus/pull/10387)) + - Adds a `lotus chain prune hot` command, to run the garbage collection of the hotstore in a user driven way. +- feat: sched: Assigner experiments ([filecoin-project/lotus#10356](https://github.com/filecoin-project/lotus/pull/10356)) + - Introduces experimental scheduler assigners that works better for setups that uses storage-only lotus-workers. +- fix: wdpost: disabled post worker handling ([filecoin-project/lotus#10394](https://github.com/filecoin-project/lotus/pull/10394)) + - Improved scheduling logic for Proof-of-SpaceTime workers. +- feat: cli: list claims and remove expired claims ([filecoin-project/lotus#9875](https://github.com/filecoin-project/lotus/pull/9875)) + - Adds a command to list claims made by a provider `lotus filplus list-claims`. And `lotus filplus remove-expired-claims` to remove expired claims. +- feat: cli: make sectors list much faster ([filecoin-project/lotus#10202](https://github.com/filecoin-project/lotus/pull/10202)) + - Makes `lotus-miner sector list` checks run in parallel. +- feat: cli: Add an EVM command to fetch a contract's bytecode ([filecoin-project/lotus#10443](https://github.com/filecoin-project/lotus/pull/10443)) + - Adds an `lotus evm bytecode` command to fetch a contract's bytecode. +- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x (#10416) ([filecoin-project/lotus#10416](https://github.com/filecoin-project/lotus/pull/10416)) + - Reduces replacement message fee logic to help include update message replacements from developers using Ethereum tools like MetaMask. +- feat: update renew-sectors with FIP-0045 logic ([filecoin-project/lotus#10328](https://github.com/filecoin-project/lotus/pull/10328)) + - Updates the `lotus-miner sectors extend` with FIP-0045 logic to include the ability to drop claims and set the maximum number of messages contained in a message. +- feat: IPC: Abstract common consensus functions and consensus interface ([filecoin-project/lotus#9481](https://github.com/filecoin-project/lotus/pull/9481)) + - Add eudico's consensus interface to Lotus and implement EC behind that interface. This abstraction is the stepping-stone for Mir's integration. +- fix: worker: add all tasks flag ([filecoin-project/lotus#10232](https://github.com/filecoin-project/lotus/pull/10232)) + - Adds an `all` flag for the `lotus-worker tasks enable/disable` cmds. +- feat:shed:add cid to cbor serialization command ([filecoin-project/lotus#10032](https://github.com/filecoin-project/lotus/pull/10032)) + - Adds two `lotus-shed` commands, `lotus-shed cid bytes` and `lotus-shed cid cbor` to serialize cid to cbor and cid to bytes. +- feat: add toolshed commands to inspect statetree size ([filecoin-project/lotus#9982](https://github.com/filecoin-project/lotus/pull/9982)) + - Adds two commands, `lotus-shed stat-actor` and `lotus-shed stat-obj` that work with an offline lotus repo to report dag size stats. +- feat: shed: encode address to bytes ([filecoin-project/lotus#10105](https://github.com/filecoin-project/lotus/pull/10105)) + - Adds a `lotus-shed address encode` for encoding a filecoin address to hex bytes. +- feat: chain: export-range ([filecoin-project/lotus#10145](https://github.com/filecoin-project/lotus/pull/10145)) + - Adds a `lotus chain export-range` command that can create archival-grade ranged exports of the chain as quickly as possible. +- feat: stmgr: cache migrated stateroots ([filecoin-project/lotus#10282](https://github.com/filecoin-project/lotus/pull/10282)) + - Cache network migration results to avoid running migrations twice. +- feat: shed: Add a tool to read data from sectors ([filecoin-project/lotus#10169](https://github.com/filecoin-project/lotus/pull/10169)) + - Adds a lotus-shed sectors read command that extract data from sectors from a running lotus-miner deployment. +- feat: cli: Refactor renew and remove extend ([filecoin-project/lotus#9920](https://github.com/filecoin-project/lotus/pull/9920)) + - Refactors the `lotus-miner sectors extend` command to have the functionality of `lotus-miner sectors renew`. The `lotus-miner sectors renew` command has been deprecated. +- feat: shed: Add beneficiary commands ([filecoin-project/lotus#10037](https://github.com/filecoin-project/lotus/pull/10037)) + - Adds the beneficiary address command to `lotus-shed`. You can now use `lotus-shed actor propose-change-beneficiary` and `lotus-shed actor confirm-change-beneficiary` to change beneficiary addresses. + +## Improvements + +- backport: fix: miner: correctly count sector extensions (10555) ([filecoin-project/lotus#10555](https://github.com/filecoin-project/lotus/pull/10555)) + - Fixes the issue with sector extensions. +- fix: proving: Initialize slice with with same length as partition (#10574) ([filecoin-project/lotus#10574])(https://github.com/filecoin-project/lotus/pull/10574) + - Fixes an issue where `lotus-miner proving compute window-post` paniced when trying to make skipped sectors human readable. +- feat: stmgr: speed up calculation of genesis circ supply (#10553) ([filecoin-project/lotus#10553])(https://github.com/filecoin-project/lotus/pull/10553) +- perf: eth: gas estimate set applyTsMessages false (#10546) ([filecoin-project/lotus#10456](https://github.com/filecoin-project/lotus/pull/10546)) +- feat: config: Force existing users to opt into new defaults (#10488) ([filecoin-project/lotus#10488](https://github.com/filecoin-project/lotus/pull/10488)) + - Force existing users to opt into the new SplitStore defaults. +- fix: splitstore: Demote now common logs (#10516) ([filecoin-project/lotus#10516](https://github.com/filecoin-project/lotus/pull/10516)) +- fix: splitstore: Don't enforce walking receipt tree during compaction ([filecoin-project/lotus#10502](https://github.com/filecoin-project/lotus/pull/10502)) +- fix: splitstore: Fix the overzealous fix (#10366) ([filecoin-project/lotus#10366](https://github.com/filecoin-project/lotus/pull/10366)) +- fix: splitstore: Two fixes, better logging and comments (#10332) ([filecoin-project/lotus#10332](https://github.com/filecoin-project/lotus/pull/10332)) +- fix: fsm: shutdown removed sectors FSMs ([filecoin-project/lotus#10363](https://github.com/filecoin-project/lotus/pull/10363)) + - Fixes an issue where removed sectors still got state machine events. +- fix: rpcenc: Don't hang when source dies ([filecoin-project/lotus#10116](https://github.com/filecoin-project/lotus/pull/10116)) + - Fixes an issue where AddPiece tasks could get stuck if the Boost process was abruptly lost. +- fix: make debugging windowPoSt-failures human readable ([filecoin-project/lotus#10390](https://github.com/filecoin-project/lotus/pull/10390)) + - Makes the skipped sector list in `lotus-miner proving compute window-post` human readable. +- fix: cli: Hide `lotus-worker set` command ([filecoin-project/lotus#10384](https://github.com/filecoin-project/lotus/pull/10384)) + - Hides the `lotus-worker set` command. This command will be deprecated later. +- fix: worker: Hide `wait-quiet` cmd ([filecoin-project/lotus#10331](https://github.com/filecoin-project/lotus/pull/10331)) + - Hides the `lotus-worker wait-quiet` command. This command will be deprecated later. +- fix: post: Tune down default post-parallel-reads ([filecoin-project/lotus#10365](https://github.com/filecoin-project/lotus/pull/10365)) + - Tuning down the default post-parallel-reads to a more conservative number to prevent sectors from being skipped due to network timeouts. +- fix: cli: error if backup file already exists ([filecoin-project/lotus#10209](https://github.com/filecoin-project/lotus/pull/10209)) + - Error out if a backup file with the same name already exists when using the `lotus-miner backup` or `lotus backup` command +- fix: cli: option to set-seal-delay in seconds ([filecoin-project/lotus#10208](https://github.com/filecoin-project/lotus/pull/10208)) + - Adds the option to specify `lotus-miner sectors set-seal-delay` in seconds +- fix: cli: extend cmd to get the right sector number ([filecoin-project/lotus#10182](https://github.com/filecoin-project/lotus/pull/10182)) + - Making sure the `lotus-miner sectors extend` command gets the correct sector number. +- feat: wdpost: Emit more detailed errors ([filecoin-project/lotus#10121](https://github.com/filecoin-project/lotus/pull/10121)) + - Emits more detailed windowPoSt error messages, making it easier to debug PoSt issues. +- fix: Lotus Gateway: Add missing methods - master ([filecoin-project/lotus#10420](https://github.com/filecoin-project/lotus/pull/10420)) + - Adds `StateNetworkName`, `MpoolGetNonce`, `StateCall` and `StateDecodeParams` methods to Lotus Gateway. +- fix: stmgr: don't attempt to lookup genesis state (#10472) ([filecoin-project/lotus#10472](https://github.com/filecoin-project/lotus/pull/10472)) +- feat: gateway: export StateVerifierStatus ([filecoin-project/lotus#10477](https://github.com/filecoin-project/lotus/pull/10477)) +- fix: gateway: correctly apply the fee history lookback max ([filecoin-project/lotus#10464](https://github.com/filecoin-project/lotus/pull/10464)) +- fix: gateway: drop overzealous guard on MsigGetVested ([filecoin-project/lotus#10451](https://github.com/filecoin-project/lotus/pull/10451)) +- feat: apply gateway lookback limit to eth API lookback ([filecoin-project/lotus#10467](https://github.com/filecoin-project/lotus/pull/10467)) +- fix: revert "Eth API: drop support for 'pending' block parameter." ([filecoin-project/lotus#10474](https://github.com/filecoin-project/lotus/pull/10474)) +- fix: Eth API: make net_version return the chain ID ([filecoin-project/lotus#10456](https://github.com/filecoin-project/lotus/pull/10456)) +- fix: eth: handle a potential divide by zero in receipt handling ([filecoin-project/lotus#10495](https://github.com/filecoin-project/lotus/pull/10495)) +- fix: ethrpc: Don't lock up when eth subscriber goes away ([filecoin-project/lotus#10485](https://github.com/filecoin-project/lotus/pull/10485)) +- feat: eth: Avoid StateCompute in EthTxnReceipt lookup (#10460) ([filecoin-project/lotus#10460](https://github.com/filecoin-project/lotus/pull/10460)) +- feat: eth: optimize eth block loading + eth_feeHistory ([filecoin-project/lotus#10446](https://github.com/filecoin-project/lotus/pull/10446)) +- feat: state: skip tipset execution when possible ([filecoin-project/lotus#10445](https://github.com/filecoin-project/lotus/pull/10445)) +- feat: eth API: reject masked ID addresses embedded in f410f payloads ([filecoin-project/lotus#10440](https://github.com/filecoin-project/lotus/pull/10440)) +- fix: Eth API: make block parameter parsing sounder. ([filecoin-project/lotus#10427](https://github.com/filecoin-project/lotus/pull/10427)) +- fix: eth API: return correct txIdx around null blocks (#10419) ([filecoin-project/lotus#10419](https://github.com/filecoin-project/lotus/pull/10419)) +- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium (#10413) ([filecoin-project/lotus#10413](https://github.com/filecoin-project/lotus/pull/10413)) +- refactor: EthAPI: Drop unnecessary param from newEthTxReceipt ([filecoin-project/lotus#10411](https://github.com/filecoin-project/lotus/pull/10411)) +- fix: eth API: correct gateway restrictions, drop unimplemented methods ([filecoin-project/lotus#10409](https://github.com/filecoin-project/lotus/pull/10409)) +- fix: EthAPI: Correctly get parent hash ([filecoin-project/lotus#10389](https://github.com/filecoin-project/lotus/pull/10389)) +- fix: EthAPI: Make newEthBlockFromFilecoinTipSet faster and correct ([filecoin-project/lotus#10380](https://github.com/filecoin-project/lotus/pull/10380)) +- fix: eth: incorrect struct tags (#10309) ([filecoin-project/lotus#10309](https://github.com/filecoin-project/lotus/pull/10309)) +- refactor: update cache to the new generic version (#10463) ([filecoin-project/lotus#10463](https://github.com/filecoin-project/lotus/pull/10463)) +- feat: consensus: log ApplyBlock timing/gas stats ([filecoin-project/lotus#10470](https://github.com/filecoin-project/lotus/pull/10470)) +- feat: chain: make chain tipset fetching 1000x faster ([filecoin-project/lotus#10423](https://github.com/filecoin-project/lotus/pull/10423)) +- chain: explicitly check that gasLimit is above zero ([filecoin-project/lotus#10198](https://github.com/filecoin-project/lotus/pull/10198)) +- feat: blockstore: Envvar can adjust badger compaction worker poolsize ([filecoin-project/lotus#9973](https://github.com/filecoin-project/lotus/pull/9973)) +- feat: stmgr: add env to disable premigrations ([filecoin-project/lotus#10283](https://github.com/filecoin-project/lotus/pull/10283)) +- chore: Remove legacy market info from lotus-miner info ([filecoin-project/lotus#10364](https://github.com/filecoin-project/lotus/pull/10364)) + - Removes the legacy market info in the `Lotus-Miner info`. Speeds up the command significantly. +- chore: blockstore: Plumb through a proper Flush() method on all blockstores ([filecoin-project/lotus#10465](https://github.com/filecoin-project/lotus/pull/10465)) +- fix: extend LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC to the markset ([filecoin-project/lotus#10172](https://github.com/filecoin-project/lotus/pull/10172)) +- feat: vm: switch to the new exec trace format (#10372) ([filecoin-project/lotus#10372](https://github.com/filecoin-project/lotus/pull/10372)) +- fix: Remove workaround that is no longer needed ([filecoin-project/lotus#9995](https://github.com/filecoin-project/lotus/pull/9995)) +- feat: Check for allocation expiry when waiting to seal sectors ([filecoin-project/lotus#9878](https://github.com/filecoin-project/lotus/pull/9878)) +- feat: Allow libp2p user agent to be overriden ([filecoin-project/lotus#10149](https://github.com/filecoin-project/lotus/pull/10149)) +- feat: cli: Add global color flag ([filecoin-project/lotus#10022](https://github.com/filecoin-project/lotus/pull/10022)) +- fix: should not serve non v0 api in v0 ([filecoin-project/lotus#10066](https://github.com/filecoin-project/lotus/pull/10066)) +- fix: build: drop drand incentinet servers ([filecoin-project/lotus#10476](https://github.com/filecoin-project/lotus/pull/10476)) +- fix: sealing: stub out the FileSize function on Windows ([filecoin-project/lotus#10035](https://github.com/filecoin-project/lotus/pull/10035)) + +## Dependencies +- github.com/filecoin-project/go-dagaggregator-unixfs (v0.2.0 -> v0.3.0): +- github.com/filecoin-project/go-fil-markets (v1.25.2 -> v1.27.0-rc1): +- github.com/filecoin-project/go-jsonrpc (v0.2.1 -> v0.2.3): +- github.com/filecoin-project/go-statemachine (v1.0.2 -> v1.0.3): +- github.com/filecoin-project/go-state-types (v0.10.0 -> v0.11.0-alpha-3) +- github.com/ipfs/go-cid (v0.3.2 -> v0.4.0): +- github.com/ipfs/go-libipfs (v0.5.0 -> v0.7.0): +- github.com/ipfs/go-path (v0.3.0 -> v0.3.1): +- chore: deps: update to go-state-types v0.11.0-alpha-3 (([filecoin-project/lotus#10606](https://github.com/filecoin-project/lotus/pull/10606)) +- deps: update go-libp2p-pubsub to v0.9.3 ([filecoin-project/lotus#10483](https://github.com/filecoin-project/lotus/pull/10483)) +- deps: Update go-jsonrpc to v0.2.2 ([filecoin-project/lotus#10395](https://github.com/filecoin-project/lotus/pull/10395)) +- Update to go-data-transfer v2 and libp2p, still wip ([filecoin-project/lotus#10382](https://github.com/filecoin-project/lotus/pull/10382)) +- dep: ipld: update ipld prime to v0.20.0 ([filecoin-project/lotus#10247](https://github.com/filecoin-project/lotus/pull/10247)) +- chore: node: migrate go-bitswap to go-libipfs/bitswap ([filecoin-project/lotus#10138](https://github.com/filecoin-project/lotus/pull/10138)) +- chore: all: bump go-libipfs to replace go-block-format ([filecoin-project/lotus#10126](https://github.com/filecoin-project/lotus/pull/10126)) +- chore: market: Upgrade to index-provider 0.10.0 ([filecoin-project/lotus#9981](https://github.com/filecoin-project/lotus/pull/9981)) +- chore: all: bump go-libipfs ([filecoin-project/lotus#10563](https://github.com/filecoin-project/lotus/pull/10563)) + +## Others +- Update service_developer_bug_report.yml ([filecoin-project/lotus#10321](https://github.com/filecoin-project/lotus/pull/10321)) +- Update service_developer_bug_report.yml ([filecoin-project/lotus#10321](https://github.com/filecoin-project/lotus/pull/10321)) +- chore: github: Service-provider/dev bug template ([filecoin-project/lotus#10321](https://github.com/filecoin-project/lotus/pull/10321)) +- chore: github: update enhancement and feature templates ([filecoin-project/lotus#10291](https://github.com/filecoin-project/lotus/pull/10291)) +- chore: github: Update bug_report template ([filecoin-project/lotus#10289](https://github.com/filecoin-project/lotus/pull/10289)) +- fix: itest: avoid failing the test when we race the miner ([filecoin-project/lotus#10461](https://github.com/filecoin-project/lotus/pull/10461)) +- fix: github: Discussion and FIP links in `New Issue` ([filecoin-project/lotus#10268](https://github.com/filecoin-project/lotus/pull/10268)) +- fix: state: short-circuit genesis state computation ([filecoin-project/lotus#10397](https://github.com/filecoin-project/lotus/pull/10397)) +- fix: rpcenc: deflake TestReaderRedirectDrop ([filecoin-project/lotus#10406](https://github.com/filecoin-project/lotus/pull/10406)) +- fix: tests: Fix TestMinerAllInfo test ([filecoin-project/lotus#10319](https://github.com/filecoin-project/lotus/pull/10319)) +- fix: tests: Make TestWorkerKeyChange not flaky ([filecoin-project/lotus#10320](https://github.com/filecoin-project/lotus/pull/10320)) +- test: eth: make sure we can deploy a new placeholder on transfer (#10281) ([filecoin-project/lotus#10281](https://github.com/filecoin-project/lotus/pull/10281)) +- fix: itests: Fix flaky paych test ([filecoin-project/lotus#10100](https://github.com/filecoin-project/lotus/pull/10100)) +- fix: cli: add ArgsUsage ([filecoin-project/lotus#10147](https://github.com/filecoin-project/lotus/pull/10147)) +- chore: cli: cleanup cli ([filecoin-project/lotus#10114](https://github.com/filecoin-project/lotus/pull/10114)) +- chore: cli: Remove unneeded individual color flags ([filecoin-project/lotus#10028](https://github.com/filecoin-project/lotus/pull/10028)) +- fix: cli: remove requirements in helptext ([filecoin-project/lotus#9969](https://github.com/filecoin-project/lotus/pull/9969)) +- chore: build: release v1.21.0-rc1 prep ([filecoin-project/lotus#10524](https://github.com/filecoin-project/lotus/pull/10524)) +- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10308](https://github.com/filecoin-project/lotus/pull/10308)) +- chore: merge release branch into master ([filecoin-project/lotus#10272](https://github.com/filecoin-project/lotus/pull/10272)) +- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10238](https://github.com/filecoin-project/lotus/pull/10238)) +- chore: releases to master ([filecoin-project/lotus#10490](https://github.com/filecoin-project/lotus/pull/10490)) +- chore: merge releases into master ([filecoin-project/lotus#10377](https://github.com/filecoin-project/lotus/pull/10377)) +- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10030](https://github.com/filecoin-project/lotus/pull/10030)) +- chore: update ffi to increase execution parallelism (#10480) ([filecoin-project/lotus#10480](https://github.com/filecoin-project/lotus/pull/10480)) +- chore: update the FFI for release (#10435) ([filecoin-project/lotus#10444](https://github.com/filecoin-project/lotus/pull/10444)) +- build: bump version to v1.21.0-dev ([filecoin-project/lotus#10249](https://github.com/filecoin-project/lotus/pull/10249)) +- build: docker: Update GO-version (#10591) ([filecoin-project/lotus#10249](https://github.com/filecoin-project/lotus/pull/10591)) +- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10184](https://github.com/filecoin-project/lotus/pull/10184)) +- docs: API Gateway: patch documentation note about make gen command ([filecoin-project/lotus#10422](https://github.com/filecoin-project/lotus/pull/10422)) +- chore: docs: fix docs typos ([filecoin-project/lotus#10155](https://github.com/filecoin-project/lotus/pull/10155)) +- chore: docker: Add back <> parameter for docker push ([filecoin-project/lotus#10096](https://github.com/filecoin-project/lotus/pull/10096)) +- chore: docker: Properly balance <> in circleci docker config ([filecoin-project/lotus#10088](https://github.com/filecoin-project/lotus/pull/10088)) +- chore: ci: Fix dirty git state when building docker images ([filecoin-project/lotus#10125](https://github.com/filecoin-project/lotus/pull/10125)) +- chore: build: Remove AppImage and Snapcraft build automation ([filecoin-project/lotus#10003](https://github.com/filecoin-project/lotus/pull/10003)) +- chore: ci: Update codeql to v2 ([filecoin-project/lotus#10120](https://github.com/filecoin-project/lotus/pull/10120)) +- feat: ci: make ci more efficient ([filecoin-project/lotus#9910](https://github.com/filecoin-project/lotus/pull/9910)) +- feat: scripts: go.mod dep diff script ([filecoin-project/lotus#9711](https://github.com/filecoin-project/lotus/pull/9711)) + +## Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +2909/-6026 | 84 | +| Łukasz Magiera | 42 | +2967/-1848 | 95 | +| Steven Allen | 20 | +1703/-1345 | 88 | +| Alfonso de la Rocha | 17 | +823/-1808 | 86 | +| Peter Rabbitson | 9 | +1957/-219 | 34 | +| Geoff Stuart | 12 | +818/-848 | 29 | +| hannahhoward | 5 | +507/-718 | 36 | +| Hector Sanjuan | 6 | +443/-726 | 35 | +| Kevin Li | 1 | +1124/-14 | 22 | +| zenground0 | 30 | +791/-269 | 88 | +| frrist | 1 | +992/-16 | 13 | +| Travis Person | 4 | +837/-53 | 24 | +| Phi | 20 | +622/-254 | 34 | +| Ian Davis | 7 | +35/-729 | 20 | +| Aayush | 10 | +378/-177 | 40 | +| Raúl Kripalani | 15 | +207/-138 | 19 | +| Arsenii Petrovich | 7 | +248/-94 | 30 | +| ZenGround0 | 5 | +238/-39 | 15 | +| Neel Virdy | 1 | +109/-107 | 58 | +| ychiao | 1 | +135/-39 | 3 | +| Jorropo | 2 | +87/-82 | 67 | +| Marten Seemann | 8 | +69/-64 | 17 | +| Rod Vagg | 1 | +55/-16 | 3 | +| Masih H. Derkani | 3 | +39/-27 | 12 | +| raulk | 2 | +30/-29 | 5 | +| dependabot[bot] | 4 | +37/-17 | 8 | +| beck | 2 | +38/-2 | 2 | +| Jennifer Wang | 4 | +20/-19 | 19 | +| Richard Guan | 3 | +28/-8 | 5 | +| omahs | 7 | +14/-14 | 7 | +| dirkmc | 2 | +19/-7 | 6 | +| David Choi | 2 | +16/-5 | 2 | +| Mike Greenberg | 1 | +18/-1 | 1 | +| Adin Schmahmann | 1 | +19/-0 | 2 | +| Phi-rjan | 5 | +12/-4 | 5 | +| Dirk McCormick | 2 | +6/-6 | 3 | +| Aayush Rajasekaran | 2 | +9/-3 | 2 | +| Jiaying Wang | 5 | +6/-4 | 5 | +| Anjor Kanekar | 1 | +5/-5 | 1 | +| vyzo | 1 | +3/-3 | 2 | +| 0x5459 | 1 | +1/-1 | 1 | + # v1.22.1 / 2023-04-23 ## Important Notice @@ -166,7 +606,10 @@ verifiedregistry bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a ### Dependencies github.com/filecoin-project/go-state-types (v0.11.0-rc1 -> v0.11.1): +<<<<<<< HEAD +======= +>>>>>>> releases # v1.20.4 / 2023-03-17 This is a patch release intended to alleviate performance issues reported by some users since the nv18 upgrade. @@ -186,7 +629,22 @@ Users with higher memory specs can experiment with setting `LOTUS_FVM_CONCURRENC # v1.20.3 / 2023-03-09 -A 🐈 stepped on the ⌨️ and made a mistake while resolving conflicts 😨. This releases only includes #10439 to fix that mistake. v1.20.2 is retracted - Please skip v1.20.2 and only update to v1.20.3!!! +A 🐈 stepped on the ⌨️ and made a mistake while resolving conflicts 😨. This releases only includes #10439 to fix that mistake. v1.20.2 is retracted - Please skip v1.20.2 and **only** update to v1.20.3!!! + +## Changelog +> compare to v1.20.1 + +This is a HIGHLY RECOMMENDED patch release for node operators/API service providers that run ETH RPC service and an optional release for Storage Providers. + +## Bug fixes +- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium #10413 +- fix: eth API: return correct txIdx around null blocks #10419 +- fix: Eth API: make block parameter parsing sounder. #10427 + +## Improvement +- feat: Lotus Gateway: Add missing methods - master #10420 +- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x #10416 +- We recommend storage providers to update your nodes to this patch, that will help improve developers who use Ethereum tooling's experience. # v1.20.2 / 2023-03-09 diff --git a/Dockerfile b/Dockerfile index 81089517b..dfdfedce3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ ##################################### -FROM golang:1.18.8-buster AS lotus-builder +FROM golang:1.19.7-buster AS lotus-builder MAINTAINER Lotus Development Team RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 2278e8511..91373b62f 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -1,6 +1,6 @@ ##### DEPRECATED -FROM golang:1.18.8-buster AS builder-deps +FROM golang:1.19.7-buster AS builder-deps MAINTAINER Lotus Development Team RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev diff --git a/GO_VERSION_MIN b/GO_VERSION_MIN index 1a31d398c..98adfe8e1 100644 --- a/GO_VERSION_MIN +++ b/GO_VERSION_MIN @@ -1 +1 @@ -1.18.8 +1.19.7 diff --git a/README.md b/README.md index 76cac2c7e..b67cb952f 100644 --- a/README.md +++ b/README.md @@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l #### Go -To build Lotus, you need a working installation of [Go 1.18.8 or higher](https://golang.org/dl/): +To build Lotus, you need a working installation of [Go 1.19.7 or higher](https://golang.org/dl/): ```bash -wget -c https://golang.org/dl/go1.18.8.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +wget -c https://golang.org/dl/go1.19.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local ``` **TIP:** diff --git a/api/api_full.go b/api/api_full.go index 1f7c38edf..6ce061865 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -13,7 +13,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-jsonrpc" @@ -173,10 +173,24 @@ type FullNode interface { // If oldmsgskip is set, messages from before the requested roots are also not included. ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read - // ChainPrune prunes the stored chain state and garbage collects; only supported if you + // ChainExportRangeInternal triggers the export of a chain + // CAR-snapshot directly to disk. It is similar to ChainExport, + // except, depending on options, the snapshot can include receipts, + // messages and stateroots for the length between the specified head + // and tail, thus producing "archival-grade" snapshots that include + // all the on-chain data. The header chain is included back to + // genesis and these snapshots can be used to initialize Filecoin + // nodes. + ChainExportRangeInternal(ctx context.Context, head, tail types.TipSetKey, cfg ChainExportConfig) error //perm:admin + + // ChainPrune forces compaction on cold store and garbage collects; only supported if you // are using the splitstore ChainPrune(ctx context.Context, opts PruneOpts) error //perm:admin + // ChainHotGC does online (badger) GC on the hot store; only supported if you are using + // the splitstore + ChainHotGC(ctx context.Context, opts HotGCOpts) error //perm:admin + // ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore // if supported by the underlying implementation. ChainCheckBlockstore(context.Context) error //perm:admin @@ -783,10 +797,12 @@ type FullNode interface { EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read + EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) //perm:read EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read + EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read @@ -1344,6 +1360,12 @@ type PruneOpts struct { RetainState int64 } +type HotGCOpts struct { + Threshold float64 + Periodic bool + Moving bool +} + type EthTxReceipt struct { TransactionHash ethtypes.EthHash `json:"transactionHash"` TransactionIndex ethtypes.EthUint64 `json:"transactionIndex"` diff --git a/api/api_gateway.go b/api/api_gateway.go index 9bc69cc0f..4389fc34b 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -26,7 +26,7 @@ import ( // When adding / changing methods in this file: // * Do the change here // * Adjust implementation in `node/impl/` -// * Run `make gen` - this will: +// * Run `make clean && make deps && make gen` - this will: // * Generate proxy structs // * Generate mocks // * Generate markdown docs @@ -70,6 +70,7 @@ type Gateway interface { StateNetworkName(context.Context) (dtypes.NetworkName, error) StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) + StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) @@ -84,12 +85,12 @@ type Gateway interface { EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) + EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) - EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) - EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) + EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error) EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error) diff --git a/api/api_storage.go b/api/api_storage.go index 0c00b9b93..9e65c1ced 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" @@ -212,9 +212,11 @@ type StorageMiner interface { StorageDetachLocal(ctx context.Context, path string) error //perm:admin StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin - MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write - MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read - MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read + MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write + MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read + + // MarketListRetrievalDeals is deprecated, returns empty list + MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin diff --git a/api/cbor_gen.go b/api/cbor_gen.go index efa1cd1a1..80392b212 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -50,22 +50,6 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { return err } - // t.WaitSentinel (cid.Cid) (struct) - if len("WaitSentinel") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"WaitSentinel\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("WaitSentinel")); err != nil { - return err - } - - if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil { - return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err) - } - // t.Vouchers ([]*paych.SignedVoucher) (slice) if len("Vouchers") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Vouchers\" was too long") @@ -90,6 +74,23 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { return err } } + + // t.WaitSentinel (cid.Cid) (struct) + if len("WaitSentinel") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WaitSentinel\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WaitSentinel")); err != nil { + return err + } + + if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err) + } + return nil } @@ -140,19 +141,6 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { return xerrors.Errorf("unmarshaling t.Channel: %w", err) } - } - // t.WaitSentinel (cid.Cid) (struct) - case "WaitSentinel": - - { - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err) - } - - t.WaitSentinel = c - } // t.Vouchers ([]*paych.SignedVoucher) (slice) case "Vouchers": @@ -184,6 +172,20 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { t.Vouchers[i] = &v } + // t.WaitSentinel (cid.Cid) (struct) + case "WaitSentinel": + + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err) + } + + t.WaitSentinel = c + + } + default: // Field doesn't exist on this type, so ignore it cbg.ScanForLinks(r, func(cid.Cid) {}) @@ -204,19 +206,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { return err } - // t.SectorID (abi.SectorNumber) (uint64) - if len("SectorID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"SectorID\" was too long") + // t.Size (abi.UnpaddedPieceSize) (uint64) + if len("Size") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Size\" was too long") } - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil { return err } - if _, err := io.WriteString(w, string("SectorID")); err != nil { + if _, err := io.WriteString(w, string("Size")); err != nil { return err } - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { return err } @@ -236,19 +238,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { return err } - // t.Size (abi.UnpaddedPieceSize) (uint64) - if len("Size") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Size\" was too long") + // t.SectorID (abi.SectorNumber) (uint64) + if len("SectorID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorID\" was too long") } - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil { return err } - if _, err := io.WriteString(w, string("Size")); err != nil { + if _, err := io.WriteString(w, string("SectorID")); err != nil { return err } - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { return err } @@ -293,8 +295,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) { } switch name { - // t.SectorID (abi.SectorNumber) (uint64) - case "SectorID": + // t.Size (abi.UnpaddedPieceSize) (uint64) + case "Size": { @@ -305,7 +307,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) { if maj != cbg.MajUnsignedInt { return fmt.Errorf("wrong type for uint64 field") } - t.SectorID = abi.SectorNumber(extra) + t.Size = abi.UnpaddedPieceSize(extra) } // t.Offset (abi.PaddedPieceSize) (uint64) @@ -323,8 +325,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) { t.Offset = abi.PaddedPieceSize(extra) } - // t.Size (abi.UnpaddedPieceSize) (uint64) - case "Size": + // t.SectorID (abi.SectorNumber) (uint64) + case "SectorID": { @@ -335,7 +337,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) { if maj != cbg.MajUnsignedInt { return fmt.Errorf("wrong type for uint64 field") } - t.Size = abi.UnpaddedPieceSize(extra) + t.SectorID = abi.SectorNumber(extra) } @@ -474,6 +476,28 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { return err } + // t.Epoch (abi.ChainEpoch) (int64) + if len("Epoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Epoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Epoch")); err != nil { + return err + } + + if t.Epoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + // t.Value (abi.SealRandomness) (slice) if len("Value") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Value\" was too long") @@ -497,28 +521,6 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { if _, err := cw.Write(t.Value[:]); err != nil { return err } - - // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Epoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Epoch")); err != nil { - return err - } - - if t.Epoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { - return err - } - } return nil } @@ -560,7 +562,33 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { } switch name { - // t.Value (abi.SealRandomness) (slice) + // t.Epoch (abi.ChainEpoch) (int64) + case "Epoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + // t.Value (abi.SealRandomness) (slice) case "Value": maj, extra, err = cr.ReadHeader() @@ -582,32 +610,6 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { if _, err := io.ReadFull(cr, t.Value[:]); err != nil { return err } - // t.Epoch (abi.ChainEpoch) (int64) - case "Epoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Epoch = abi.ChainEpoch(extraI) - } default: // Field doesn't exist on this type, so ignore it @@ -629,6 +631,28 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { return err } + // t.Epoch (abi.ChainEpoch) (int64) + if len("Epoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Epoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Epoch")); err != nil { + return err + } + + if t.Epoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + // t.Value (abi.InteractiveSealRandomness) (slice) if len("Value") > cbg.MaxLength { return xerrors.Errorf("Value in field \"Value\" was too long") @@ -652,28 +676,6 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { if _, err := cw.Write(t.Value[:]); err != nil { return err } - - // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Epoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("Epoch")); err != nil { - return err - } - - if t.Epoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { - return err - } - } return nil } @@ -715,7 +717,33 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { } switch name { - // t.Value (abi.InteractiveSealRandomness) (slice) + // t.Epoch (abi.ChainEpoch) (int64) + case "Epoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + // t.Value (abi.InteractiveSealRandomness) (slice) case "Value": maj, extra, err = cr.ReadHeader() @@ -737,32 +765,6 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { if _, err := io.ReadFull(cr, t.Value[:]); err != nil { return err } - // t.Epoch (abi.ChainEpoch) (int64) - case "Epoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Epoch = abi.ChainEpoch(extraI) - } default: // Field doesn't exist on this type, so ignore it @@ -784,6 +786,22 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { return err } + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + // t.PublishCid (cid.Cid) (struct) if len("PublishCid") > cbg.MaxLength { return xerrors.Errorf("Value in field \"PublishCid\" was too long") @@ -806,22 +824,6 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { } } - // t.DealID (abi.DealID) (uint64) - if len("DealID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealID\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("DealID")); err != nil { - return err - } - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { - return err - } - // t.DealProposal (market.DealProposal) (struct) if len("DealProposal") > cbg.MaxLength { return xerrors.Errorf("Value in field \"DealProposal\" was too long") @@ -910,7 +912,22 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { } switch name { - // t.PublishCid (cid.Cid) (struct) + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.PublishCid (cid.Cid) (struct) case "PublishCid": { @@ -932,21 +949,6 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { t.PublishCid = &c } - } - // t.DealID (abi.DealID) (uint64) - case "DealID": - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) - } // t.DealProposal (market.DealProposal) (struct) case "DealProposal": @@ -1140,28 +1142,6 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error { return err } - // t.StartEpoch (abi.ChainEpoch) (int64) - if len("StartEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"StartEpoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, string("StartEpoch")); err != nil { - return err - } - - if t.StartEpoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - // t.EndEpoch (abi.ChainEpoch) (int64) if len("EndEpoch") > cbg.MaxLength { return xerrors.Errorf("Value in field \"EndEpoch\" was too long") @@ -1183,6 +1163,28 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error { return err } } + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } return nil } @@ -1224,33 +1226,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { } switch name { - // t.StartEpoch (abi.ChainEpoch) (int64) - case "StartEpoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - // t.EndEpoch (abi.ChainEpoch) (int64) + // t.EndEpoch (abi.ChainEpoch) (int64) case "EndEpoch": { maj, extra, err := cr.ReadHeader() @@ -1267,7 +1243,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -1276,6 +1252,32 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { t.EndEpoch = abi.ChainEpoch(extraI) } + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } default: // Field doesn't exist on this type, so ignore it diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 0e97997a6..0a0470446 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -27,7 +27,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/filestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-jsonrpc/auth" @@ -152,8 +152,8 @@ func init() { addExample(map[string]int{"name": 42}) addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()}) addExample(&types.ExecutionTrace{ - Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message), - MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), + Msg: ExampleValue("init", reflect.TypeOf(types.MessageTrace{}), nil).(types.MessageTrace), + MsgRct: ExampleValue("init", reflect.TypeOf(types.ReturnTrace{}), nil).(types.ReturnTrace), }) addExample(map[string]types.Actor{ "t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor), diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 370a213a0..83efbffdb 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -21,7 +21,7 @@ import ( address "github.com/filecoin-project/go-address" bitfield "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" jsonrpc "github.com/filecoin-project/go-jsonrpc" auth "github.com/filecoin-project/go-jsonrpc/auth" @@ -155,6 +155,20 @@ func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3) } +// ChainExportRangeInternal mocks base method. +func (m *MockFullNode) ChainExportRangeInternal(arg0 context.Context, arg1, arg2 types.TipSetKey, arg3 api.ChainExportConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainExportRangeInternal", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainExportRangeInternal indicates an expected call of ChainExportRangeInternal. +func (mr *MockFullNodeMockRecorder) ChainExportRangeInternal(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExportRangeInternal", reflect.TypeOf((*MockFullNode)(nil).ChainExportRangeInternal), arg0, arg1, arg2, arg3) +} + // ChainGetBlock mocks base method. func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) { m.ctrl.T.Helper() @@ -380,6 +394,20 @@ func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0) } +// ChainHotGC mocks base method. +func (m *MockFullNode) ChainHotGC(arg0 context.Context, arg1 api.HotGCOpts) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHotGC", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainHotGC indicates an expected call of ChainHotGC. +func (mr *MockFullNodeMockRecorder) ChainHotGC(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHotGC", reflect.TypeOf((*MockFullNode)(nil).ChainHotGC), arg0, arg1) +} + // ChainNotify mocks base method. func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) { m.ctrl.T.Helper() @@ -1268,6 +1296,21 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionByHash(arg0, arg1 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHash", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHash), arg0, arg1) } +// EthGetTransactionByHashLimited mocks base method. +func (m *MockFullNode) EthGetTransactionByHashLimited(arg0 context.Context, arg1 *ethtypes.EthHash, arg2 abi.ChainEpoch) (*ethtypes.EthTx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EthGetTransactionByHashLimited", arg0, arg1, arg2) + ret0, _ := ret[0].(*ethtypes.EthTx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EthGetTransactionByHashLimited indicates an expected call of EthGetTransactionByHashLimited. +func (mr *MockFullNodeMockRecorder) EthGetTransactionByHashLimited(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHashLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHashLimited), arg0, arg1, arg2) +} + // EthGetTransactionCount mocks base method. func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthUint64, error) { m.ctrl.T.Helper() @@ -1313,6 +1356,21 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionReceipt(arg0, arg1 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceipt", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceipt), arg0, arg1) } +// EthGetTransactionReceiptLimited mocks base method. +func (m *MockFullNode) EthGetTransactionReceiptLimited(arg0 context.Context, arg1 ethtypes.EthHash, arg2 abi.ChainEpoch) (*api.EthTxReceipt, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EthGetTransactionReceiptLimited", arg0, arg1, arg2) + ret0, _ := ret[0].(*api.EthTxReceipt) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EthGetTransactionReceiptLimited indicates an expected call of EthGetTransactionReceiptLimited. +func (mr *MockFullNodeMockRecorder) EthGetTransactionReceiptLimited(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceiptLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceiptLimited), arg0, arg1, arg2) +} + // EthMaxPriorityFeePerGas mocks base method. func (m *MockFullNode) EthMaxPriorityFeePerGas(arg0 context.Context) (ethtypes.EthBigInt, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 70579af1a..843471381 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -18,7 +18,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" @@ -117,7 +117,7 @@ type EthSubscriberStruct struct { } type EthSubscriberMethods struct { - EthSubscription func(p0 context.Context, p1 jsonrpc.RawParams) error `notify:"true"rpc_method:"eth_subscription"` + EthSubscription func(p0 context.Context, p1 jsonrpc.RawParams) error `notify:"true" rpc_method:"eth_subscription"` } type EthSubscriberStub struct { @@ -140,6 +140,8 @@ type FullNodeMethods struct { ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"` + ChainExportRangeInternal func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey, p3 ChainExportConfig) error `perm:"admin"` + ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"` ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `perm:"read"` @@ -170,6 +172,8 @@ type FullNodeMethods struct { ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"` + ChainHotGC func(p0 context.Context, p1 HotGCOpts) error `perm:"admin"` + ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) `perm:"read"` ChainPrune func(p0 context.Context, p1 PruneOpts) error `perm:"admin"` @@ -286,12 +290,16 @@ type FullNodeMethods struct { EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) `perm:"read"` + EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `perm:"read"` + EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) `perm:"read"` EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `perm:"read"` EthGetTransactionReceipt func(p0 context.Context, p1 ethtypes.EthHash) (*EthTxReceipt, error) `perm:"read"` + EthGetTransactionReceiptLimited func(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) `perm:"read"` + EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"` EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"` @@ -688,18 +696,18 @@ type GatewayMethods struct { EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) `` - EthGetTransactionByBlockHashAndIndex func(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) `` - - EthGetTransactionByBlockNumberAndIndex func(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) `` - EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) `` + EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `` + EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) `` EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `` EthGetTransactionReceipt func(p0 context.Context, p1 ethtypes.EthHash) (*EthTxReceipt, error) `` + EthGetTransactionReceiptLimited func(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) `` + EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `` EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `` @@ -772,6 +780,8 @@ type GatewayMethods struct { StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `` + StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `` + StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` Version func(p0 context.Context) (APIVersion, error) `` @@ -951,7 +961,7 @@ type StorageMinerMethods struct { MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` - MarketListRetrievalDeals func(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"` + MarketListRetrievalDeals func(p0 context.Context) ([]struct{}, error) `perm:"read"` MarketPendingDeals func(p0 context.Context) (PendingDealInfo, error) `perm:"write"` @@ -1447,6 +1457,17 @@ func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 boo return nil, ErrNotSupported } +func (s *FullNodeStruct) ChainExportRangeInternal(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey, p3 ChainExportConfig) error { + if s.Internal.ChainExportRangeInternal == nil { + return ErrNotSupported + } + return s.Internal.ChainExportRangeInternal(p0, p1, p2, p3) +} + +func (s *FullNodeStub) ChainExportRangeInternal(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey, p3 ChainExportConfig) error { + return ErrNotSupported +} + func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { if s.Internal.ChainGetBlock == nil { return nil, ErrNotSupported @@ -1612,6 +1633,17 @@ func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) { return nil, ErrNotSupported } +func (s *FullNodeStruct) ChainHotGC(p0 context.Context, p1 HotGCOpts) error { + if s.Internal.ChainHotGC == nil { + return ErrNotSupported + } + return s.Internal.ChainHotGC(p0, p1) +} + +func (s *FullNodeStub) ChainHotGC(p0 context.Context, p1 HotGCOpts) error { + return ErrNotSupported +} + func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) { if s.Internal.ChainNotify == nil { return nil, ErrNotSupported @@ -2250,6 +2282,17 @@ func (s *FullNodeStub) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes. return nil, ErrNotSupported } +func (s *FullNodeStruct) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) { + if s.Internal.EthGetTransactionByHashLimited == nil { + return nil, ErrNotSupported + } + return s.Internal.EthGetTransactionByHashLimited(p0, p1, p2) +} + +func (s *FullNodeStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) { if s.Internal.EthGetTransactionCount == nil { return *new(ethtypes.EthUint64), ErrNotSupported @@ -2283,6 +2326,17 @@ func (s *FullNodeStub) EthGetTransactionReceipt(p0 context.Context, p1 ethtypes. return nil, ErrNotSupported } +func (s *FullNodeStruct) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) { + if s.Internal.EthGetTransactionReceiptLimited == nil { + return nil, ErrNotSupported + } + return s.Internal.EthGetTransactionReceiptLimited(p0, p1, p2) +} + +func (s *FullNodeStub) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) EthMaxPriorityFeePerGas(p0 context.Context) (ethtypes.EthBigInt, error) { if s.Internal.EthMaxPriorityFeePerGas == nil { return *new(ethtypes.EthBigInt), ErrNotSupported @@ -4395,28 +4449,6 @@ func (s *GatewayStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress return *new(ethtypes.EthBytes), ErrNotSupported } -func (s *GatewayStruct) EthGetTransactionByBlockHashAndIndex(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) { - if s.Internal.EthGetTransactionByBlockHashAndIndex == nil { - return *new(ethtypes.EthTx), ErrNotSupported - } - return s.Internal.EthGetTransactionByBlockHashAndIndex(p0, p1, p2) -} - -func (s *GatewayStub) EthGetTransactionByBlockHashAndIndex(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) { - return *new(ethtypes.EthTx), ErrNotSupported -} - -func (s *GatewayStruct) EthGetTransactionByBlockNumberAndIndex(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) { - if s.Internal.EthGetTransactionByBlockNumberAndIndex == nil { - return *new(ethtypes.EthTx), ErrNotSupported - } - return s.Internal.EthGetTransactionByBlockNumberAndIndex(p0, p1, p2) -} - -func (s *GatewayStub) EthGetTransactionByBlockNumberAndIndex(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) { - return *new(ethtypes.EthTx), ErrNotSupported -} - func (s *GatewayStruct) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) { if s.Internal.EthGetTransactionByHash == nil { return nil, ErrNotSupported @@ -4428,6 +4460,17 @@ func (s *GatewayStub) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.E return nil, ErrNotSupported } +func (s *GatewayStruct) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) { + if s.Internal.EthGetTransactionByHashLimited == nil { + return nil, ErrNotSupported + } + return s.Internal.EthGetTransactionByHashLimited(p0, p1, p2) +} + +func (s *GatewayStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) { if s.Internal.EthGetTransactionCount == nil { return *new(ethtypes.EthUint64), ErrNotSupported @@ -4461,6 +4504,17 @@ func (s *GatewayStub) EthGetTransactionReceipt(p0 context.Context, p1 ethtypes.E return nil, ErrNotSupported } +func (s *GatewayStruct) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) { + if s.Internal.EthGetTransactionReceiptLimited == nil { + return nil, ErrNotSupported + } + return s.Internal.EthGetTransactionReceiptLimited(p0, p1, p2) +} + +func (s *GatewayStub) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) EthMaxPriorityFeePerGas(p0 context.Context) (ethtypes.EthBigInt, error) { if s.Internal.EthMaxPriorityFeePerGas == nil { return *new(ethtypes.EthBigInt), ErrNotSupported @@ -4857,6 +4911,17 @@ func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.A return nil, ErrNotSupported } +func (s *GatewayStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + if s.Internal.StateVerifierStatus == nil { + return nil, ErrNotSupported + } + return s.Internal.StateVerifierStatus(p0, p1, p2) +} + +func (s *GatewayStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) { if s.Internal.StateWaitMsg == nil { return nil, ErrNotSupported @@ -5671,15 +5736,15 @@ func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]stor return *new([]storagemarket.MinerDeal), ErrNotSupported } -func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { +func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) { if s.Internal.MarketListRetrievalDeals == nil { - return *new([]retrievalmarket.ProviderDealState), ErrNotSupported + return *new([]struct{}), ErrNotSupported } return s.Internal.MarketListRetrievalDeals(p0) } -func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) { - return *new([]retrievalmarket.ProviderDealState), ErrNotSupported +func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) { + return *new([]struct{}), ErrNotSupported } func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) { diff --git a/api/types.go b/api/types.go index e5e0c72c4..7ff413867 100644 --- a/api/types.go +++ b/api/types.go @@ -8,13 +8,15 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" "github.com/ipfs/go-graphsync" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagjson" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v9/miner" @@ -110,16 +112,12 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta IsSender: channelState.Sender() == hostID, Message: channelState.Message(), } - stringer, ok := channelState.Voucher().(fmt.Stringer) - if ok { - channel.Voucher = stringer.String() + voucher := channelState.Voucher() + voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode) + if err != nil { + channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error() } else { - voucherJSON, err := json.Marshal(channelState.Voucher()) - if err != nil { - channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error() - } else { - channel.Voucher = string(voucherJSON) - } + channel.Voucher = string(voucherJSON) } if channel.IsSender { channel.IsInitiator = !channelState.IsPull() @@ -400,3 +398,12 @@ func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error { } return nil } + +// ChainExportConfig holds configuration for chain ranged exports. +type ChainExportConfig struct { + WriteBufferSize int + NumWorkers int + IncludeMessages bool + IncludeReceipts bool + IncludeStateRoots bool +} diff --git a/api/v0api/full.go b/api/v0api/full.go index 490cd73c8..86a4ce47a 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -10,7 +10,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index 17a1ae84a..3d4a60f1c 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index 619f19d35..a4adfc944 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -20,7 +20,7 @@ import ( address "github.com/filecoin-project/go-address" bitfield "github.com/filecoin-project/go-bitfield" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket" storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket" auth "github.com/filecoin-project/go-jsonrpc/auth" diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go index da4f9f67d..3a789a178 100644 --- a/blockstore/badger/blockstore.go +++ b/blockstore/badger/blockstore.go @@ -20,6 +20,7 @@ import ( pool "github.com/libp2p/go-buffer-pool" "github.com/multiformats/go-base32" "go.uber.org/zap" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/blockstore" ) @@ -44,7 +45,8 @@ const ( // MemoryMap is equivalent to badger/options.MemoryMap. MemoryMap = options.MemoryMap // LoadToRAM is equivalent to badger/options.LoadToRAM. - LoadToRAM = options.LoadToRAM + LoadToRAM = options.LoadToRAM + defaultGCThreshold = 0.125 ) // Options embeds the badger options themselves, and augments them with @@ -394,6 +396,9 @@ func (b *Blockstore) doCopy(from, to *badger.DB) error { if workers < 2 { workers = 2 } + if workers > 8 { + workers = 8 + } stream := from.NewStream() stream.NumGo = workers @@ -439,7 +444,7 @@ func (b *Blockstore) deleteDB(path string) { } } -func (b *Blockstore) onlineGC() error { +func (b *Blockstore) onlineGC(ctx context.Context, threshold float64, checkFreq time.Duration, check func() error) error { b.lockDB() defer b.unlockDB() @@ -448,14 +453,26 @@ func (b *Blockstore) onlineGC() error { if nworkers < 2 { nworkers = 2 } + if nworkers > 7 { // max out at 1 goroutine per badger level + nworkers = 7 + } err := b.db.Flatten(nworkers) if err != nil { return err } - + checkTick := time.NewTimer(checkFreq) + defer checkTick.Stop() for err == nil { - err = b.db.RunValueLogGC(0.125) + select { + case <-ctx.Done(): + err = ctx.Err() + case <-checkTick.C: + err = check() + checkTick.Reset(checkFreq) + default: + err = b.db.RunValueLogGC(threshold) + } } if err == badger.ErrNoRewrite { @@ -468,7 +485,7 @@ func (b *Blockstore) onlineGC() error { // CollectGarbage compacts and runs garbage collection on the value log; // implements the BlockstoreGC trait -func (b *Blockstore) CollectGarbage(opts ...blockstore.BlockstoreGCOption) error { +func (b *Blockstore) CollectGarbage(ctx context.Context, opts ...blockstore.BlockstoreGCOption) error { if err := b.access(); err != nil { return err } @@ -485,8 +502,58 @@ func (b *Blockstore) CollectGarbage(opts ...blockstore.BlockstoreGCOption) error if options.FullGC { return b.movingGC() } + threshold := options.Threshold + if threshold == 0 { + threshold = defaultGCThreshold + } + checkFreq := options.CheckFreq + if checkFreq < 30*time.Second { // disallow checking more frequently than block time + checkFreq = 30 * time.Second + } + check := options.Check + if check == nil { + check = func() error { + return nil + } + } + return b.onlineGC(ctx, threshold, checkFreq, check) +} - return b.onlineGC() +// GCOnce runs garbage collection on the value log; +// implements BlockstoreGCOnce trait +func (b *Blockstore) GCOnce(ctx context.Context, opts ...blockstore.BlockstoreGCOption) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + var options blockstore.BlockstoreGCOptions + for _, opt := range opts { + err := opt(&options) + if err != nil { + return err + } + } + if options.FullGC { + return xerrors.Errorf("FullGC option specified for GCOnce but full GC is non incremental") + } + + threshold := options.Threshold + if threshold == 0 { + threshold = defaultGCThreshold + } + + b.lockDB() + defer b.unlockDB() + + // Note no compaction needed before single GC as we will hit at most one vlog anyway + err := b.db.RunValueLogGC(threshold) + if err == badger.ErrNoRewrite { + // not really an error in this case, it signals the end of GC + return nil + } + + return err } // Size returns the aggregate size of the blockstore @@ -551,6 +618,18 @@ func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) erro }) } +func (b *Blockstore) Flush(context.Context) error { + if err := b.access(); err != nil { + return err + } + defer b.viewers.Done() + + b.lockDB() + defer b.unlockDB() + + return b.db.Sync() +} + // Has implements Blockstore.Has. func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { if err := b.access(); err != nil { diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go index fc81be43e..bf85104bb 100644 --- a/blockstore/badger/blockstore_test.go +++ b/blockstore/badger/blockstore_test.go @@ -145,7 +145,7 @@ func testMove(t *testing.T, optsF func(string) Options) { return nil }) g.Go(func() error { - return db.CollectGarbage(blockstore.WithFullGC(true)) + return db.CollectGarbage(ctx, blockstore.WithFullGC(true)) }) err = g.Wait() @@ -230,7 +230,7 @@ func testMove(t *testing.T, optsF func(string) Options) { checkPath() // now do another FullGC to test the double move and following of symlinks - if err := db.CollectGarbage(blockstore.WithFullGC(true)); err != nil { + if err := db.CollectGarbage(ctx, blockstore.WithFullGC(true)); err != nil { t.Fatal(err) } diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index f2fb00e8a..195e991e1 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -2,6 +2,7 @@ package blockstore import ( "context" + "time" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -18,6 +19,7 @@ type Blockstore interface { blockstore.Blockstore blockstore.Viewer BatchDeleter + Flusher } // BasicBlockstore is an alias to the original IPFS Blockstore. @@ -25,6 +27,10 @@ type BasicBlockstore = blockstore.Blockstore type Viewer = blockstore.Viewer +type Flusher interface { + Flush(context.Context) error +} + type BatchDeleter interface { DeleteMany(ctx context.Context, cids []cid.Cid) error } @@ -36,7 +42,12 @@ type BlockstoreIterator interface { // BlockstoreGC is a trait for blockstores that support online garbage collection type BlockstoreGC interface { - CollectGarbage(options ...BlockstoreGCOption) error + CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error +} + +// BlockstoreGCOnce is a trait for a blockstore that supports incremental online garbage collection +type BlockstoreGCOnce interface { + GCOnce(ctx context.Context, options ...BlockstoreGCOption) error } // BlockstoreGCOption is a functional interface for controlling blockstore GC options @@ -45,6 +56,12 @@ type BlockstoreGCOption = func(*BlockstoreGCOptions) error // BlockstoreGCOptions is a struct with GC options type BlockstoreGCOptions struct { FullGC bool + // fraction of garbage in badger vlog before its worth processing in online GC + Threshold float64 + // how often to call the check function + CheckFreq time.Duration + // function to call periodically to pause or early terminate GC + Check func() error } func WithFullGC(fullgc bool) BlockstoreGCOption { @@ -54,6 +71,27 @@ func WithFullGC(fullgc bool) BlockstoreGCOption { } } +func WithThreshold(threshold float64) BlockstoreGCOption { + return func(opts *BlockstoreGCOptions) error { + opts.Threshold = threshold + return nil + } +} + +func WithCheckFreq(f time.Duration) BlockstoreGCOption { + return func(opts *BlockstoreGCOptions) error { + opts.CheckFreq = f + return nil + } +} + +func WithCheck(check func() error) BlockstoreGCOption { + return func(opts *BlockstoreGCOptions) error { + opts.Check = check + return nil + } +} + // BlockstoreSize is a trait for on-disk blockstores that can report their size type BlockstoreSize interface { Size() (int64, error) @@ -92,6 +130,13 @@ type adaptedBlockstore struct { var _ Blockstore = (*adaptedBlockstore)(nil) +func (a *adaptedBlockstore) Flush(ctx context.Context) error { + if flusher, canFlush := a.Blockstore.(Flusher); canFlush { + return flusher.Flush(ctx) + } + return nil +} + func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { blk, err := a.Get(ctx, cid) if err != nil { diff --git a/blockstore/buffered.go b/blockstore/buffered.go index e089ed561..bc37b2f69 100644 --- a/blockstore/buffered.go +++ b/blockstore/buffered.go @@ -46,6 +46,8 @@ var ( _ Viewer = (*BufferedBlockstore)(nil) ) +func (bs *BufferedBlockstore) Flush(ctx context.Context) error { return bs.write.Flush(ctx) } + func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { a, err := bs.read.AllKeysChan(ctx) if err != nil { diff --git a/blockstore/discard.go b/blockstore/discard.go index 7f1a76a22..878797561 100644 --- a/blockstore/discard.go +++ b/blockstore/discard.go @@ -38,6 +38,10 @@ func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) err return b.bs.View(ctx, cid, f) } +func (b *discardstore) Flush(ctx context.Context) error { + return nil +} + func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error { return nil } diff --git a/blockstore/idstore.go b/blockstore/idstore.go index a0ecec5f0..ae807076d 100644 --- a/blockstore/idstore.go +++ b/blockstore/idstore.go @@ -179,3 +179,7 @@ func (b *idstore) Close() error { } return nil } + +func (b *idstore) Flush(ctx context.Context) error { + return b.bs.Flush(ctx) +} diff --git a/blockstore/ipfs.go b/blockstore/ipfs.go index 756314f2d..c7dbb480a 100644 --- a/blockstore/ipfs.go +++ b/blockstore/ipfs.go @@ -3,7 +3,7 @@ package blockstore import ( "bytes" "context" - "io/ioutil" + "io" "github.com/ipfs/go-cid" httpapi "github.com/ipfs/go-ipfs-http-client" @@ -103,7 +103,7 @@ func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, er return nil, xerrors.Errorf("getting ipfs block: %w", err) } - data, err := ioutil.ReadAll(rd) + data, err := io.ReadAll(rd) if err != nil { return nil, err } diff --git a/blockstore/mem.go b/blockstore/mem.go index 05da287c5..5b06634de 100644 --- a/blockstore/mem.go +++ b/blockstore/mem.go @@ -17,6 +17,8 @@ func NewMemory() MemBlockstore { // To match behavior of badger blockstore we index by multihash only. type MemBlockstore map[string]blocks.Block +func (MemBlockstore) Flush(context.Context) error { return nil } + func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { delete(m, string(k.Hash())) return nil diff --git a/blockstore/net.go b/blockstore/net.go index a6b008af2..62aceed71 100644 --- a/blockstore/net.go +++ b/blockstore/net.go @@ -410,6 +410,8 @@ func (n *NetworkStore) HashOnRead(enabled bool) { return } +func (*NetworkStore) Flush(context.Context) error { return nil } + func (n *NetworkStore) Stop(ctx context.Context) error { close(n.closing) diff --git a/blockstore/splitstore/markset_badger.go b/blockstore/splitstore/markset_badger.go index e98046862..2dac673cd 100644 --- a/blockstore/splitstore/markset_badger.go +++ b/blockstore/splitstore/markset_badger.go @@ -11,6 +11,8 @@ import ( "github.com/ipfs/go-cid" "go.uber.org/zap" "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/system" ) type BadgerMarkSetEnv struct { @@ -349,7 +351,7 @@ func (s *BadgerMarkSet) write(seqno int) (err error) { persist := s.persist s.mx.RUnlock() - if persist { + if persist && !system.BadgerFsyncDisable { // WARNING: disabling sync makes recovery from crash during critical section unsound return s.db.Sync() } diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 0afb15c11..2d51ce423 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -115,6 +115,23 @@ type Config struct { // A positive value is the number of compactions before a full GC is performed; // a value of 1 will perform full GC in every compaction. HotStoreFullGCFrequency uint64 + + // HotstoreMaxSpaceTarget suggests the max allowed space the hotstore can take. + // This is not a hard limit, it is possible for the hotstore to exceed the target + // for example if state grows massively between compactions. The splitstore + // will make a best effort to avoid overflowing the target and in practice should + // never overflow. This field is used when doing GC at the end of a compaction to + // adaptively choose moving GC + HotstoreMaxSpaceTarget uint64 + + // Moving GC will be triggered when total moving size exceeds + // HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold + HotstoreMaxSpaceThreshold uint64 + + // Safety buffer to prevent moving GC from overflowing disk. + // Moving GC will not occur when total moving size exceeds + // HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer + HotstoreMaxSpaceSafetyBuffer uint64 } // ChainAccessor allows the Splitstore to access the chain. It will most likely @@ -165,10 +182,16 @@ type SplitStore struct { compactionIndex int64 pruneIndex int64 + onlineGCCnt int64 ctx context.Context cancel func() + outOfSync int32 // for fast checking + chainSyncMx sync.Mutex + chainSyncCond sync.Cond + chainSyncFinished bool // protected by chainSyncMx + debug *debugLog // transactional protection for concurrent read/writes during compaction @@ -195,6 +218,17 @@ type SplitStore struct { // registered protectors protectors []func(func(cid.Cid) error) error + + // dag sizes measured during latest compaction + // logged and used for GC strategy + + // protected by compaction lock + szWalk int64 + szProtectedTxns int64 + szKeys int64 // approximate, not counting keys protected when entering critical section + + // protected by txnLk + szMarkedLiveRefs int64 } var _ bstore.Blockstore = (*SplitStore)(nil) @@ -232,6 +266,7 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co ss.txnViewsCond.L = &ss.txnViewsMx ss.txnSyncCond.L = &ss.txnSyncMx + ss.chainSyncCond.L = &ss.chainSyncMx ss.ctx, ss.cancel = context.WithCancel(context.Background()) ss.reifyCond.L = &ss.reifyMx @@ -447,6 +482,23 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { } } +func (s *SplitStore) Flush(ctx context.Context) error { + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + if err := s.cold.Flush(ctx); err != nil { + return err + } + if err := s.hot.Flush(ctx); err != nil { + return err + } + if err := s.ds.Sync(ctx, dstore.Key{}); err != nil { + return err + } + + return nil +} + func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error { if isIdentiyCid(blk.Cid()) { return nil @@ -776,6 +828,11 @@ func (s *SplitStore) Close() error { s.txnSyncCond.Broadcast() s.txnSyncMx.Unlock() + s.chainSyncMx.Lock() + s.chainSyncFinished = true + s.chainSyncCond.Broadcast() + s.chainSyncMx.Unlock() + log.Warn("close with ongoing compaction in progress; waiting for it to finish...") for atomic.LoadInt32(&s.compacting) == 1 { time.Sleep(time.Second) diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index 336515980..2645c78c5 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -95,7 +95,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { } defer visitor.Close() //nolint - err = s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor, + size := s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk @@ -133,7 +133,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return err } - log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt) + log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt, "walk size", size) write("--") write("cold: %d missing: %d", *coldCnt, *missingCnt) write("DONE") diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 5f8fb5728..644816d8e 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -66,7 +66,8 @@ var ( ) const ( - batchSize = 16384 + batchSize = 16384 + cidKeySize = 128 ) func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { @@ -90,7 +91,35 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { // Regardless, we put a mutex in HeadChange just to be safe if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) { - // we are currently compacting -- protect the new tipset(s) + // we are currently compacting + // 1. Signal sync condition to yield compaction when out of sync and resume when in sync + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + if CheckSyncGap && time.Since(timestamp) > SyncGapTime { + /* Chain out of sync */ + if atomic.CompareAndSwapInt32(&s.outOfSync, 0, 1) { + // transition from in sync to out of sync + s.chainSyncMx.Lock() + s.chainSyncFinished = false + s.chainSyncMx.Unlock() + } + // already out of sync, no signaling necessary + + } + // TODO: ok to use hysteresis with no transitions between 30s and 1m? + if time.Since(timestamp) < SyncWaitTime { + /* Chain in sync */ + if atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) { + // already in sync, no signaling necessary + } else { + // transition from out of sync to in sync + s.chainSyncMx.Lock() + s.chainSyncFinished = true + s.chainSyncCond.Broadcast() + s.chainSyncMx.Unlock() + } + + } + // 2. protect the new tipset(s) s.protectTipSets(apply) return nil } @@ -115,8 +144,6 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { return nil } - // Prioritize hot store compaction over cold store prune - if epoch-s.baseEpoch > CompactionThreshold { // it's time to compact -- prepare the transaction and go! s.beginTxnProtect() @@ -176,6 +203,8 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) { timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) doSync := time.Since(timestamp) < SyncWaitTime go func() { + // we are holding the txnLk while marking + // so critical section cannot delete if doSync { defer func() { s.txnSyncMx.Lock() @@ -199,9 +228,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { log.Debugf("marking %d live refs", len(cids)) startMark := time.Now() + szMarked := new(int64) + count := new(int32) visitor := newConcurrentVisitor() - walkObject := func(c cid.Cid) error { + walkObject := func(c cid.Cid) (int64, error) { return s.walkObjectIncomplete(c, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { @@ -228,10 +259,12 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { // optimize the common case of single put if len(cids) == 1 { - if err := walkObject(cids[0]); err != nil { + sz, err := walkObject(cids[0]) + if err != nil { log.Errorf("error marking tipset refs: %s", err) } log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) + atomic.AddInt64(szMarked, sz) return } @@ -243,9 +276,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { worker := func() error { for c := range workch { - if err := walkObject(c); err != nil { + sz, err := walkObject(c) + if err != nil { return err } + atomic.AddInt64(szMarked, sz) } return nil @@ -268,7 +303,8 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) { log.Errorf("error marking tipset refs: %s", err) } - log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) + log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count, "size marked", *szMarked) + s.szMarkedLiveRefs += atomic.LoadInt64(szMarked) } // transactionally protect a view @@ -361,6 +397,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { log.Infow("protecting transactional references", "refs", len(txnRefs)) count := 0 + sz := new(int64) workch := make(chan cid.Cid, len(txnRefs)) startProtect := time.Now() @@ -393,10 +430,11 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { worker := func() error { for c := range workch { - err := s.doTxnProtect(c, markSet) + szTxn, err := s.doTxnProtect(c, markSet) if err != nil { return xerrors.Errorf("error protecting transactional references to %s: %w", c, err) } + atomic.AddInt64(sz, szTxn) } return nil } @@ -409,16 +447,16 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { if err := g.Wait(); err != nil { return err } - - log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count) + s.szProtectedTxns += atomic.LoadInt64(sz) + log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count, "protected size", sz) } } // transactionally protect a reference by walking the object and marking. // concurrent markings are short circuited by checking the markset. -func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { - if err := s.checkClosing(); err != nil { - return err +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error) { + if err := s.checkYield(); err != nil { + return 0, err } // Note: cold objects are deleted heaviest first, so the consituents of an object @@ -442,7 +480,7 @@ func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { }, func(c cid.Cid) error { if s.txnMissing != nil { - log.Warnf("missing object reference %s in %s", c, root) + log.Debugf("missing object reference %s in %s", c, root) s.txnRefsMx.Lock() s.txnMissing[c] = struct{}{} s.txnRefsMx.Unlock() @@ -509,6 +547,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { // might be potentially inconsistent; abort compaction and notify the user to intervene. return xerrors.Errorf("checkpoint exists; aborting compaction") } + s.clearSizeMeasurements() currentEpoch := curTs.Height() boundaryEpoch := currentEpoch - CompactionBoundary @@ -534,7 +573,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } defer coldSet.Close() //nolint:errcheck - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } @@ -598,7 +637,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold) - if err != nil { return xerrors.Errorf("error marking: %w", err) } @@ -607,7 +645,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("marking done", "took", time.Since(startMark), "marked", *count) - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } @@ -617,7 +655,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error protecting transactional refs: %w", err) } - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } @@ -638,7 +676,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { defer purgew.Close() //nolint:errcheck // some stats for logging - var hotCnt, coldCnt, purgeCnt int + var hotCnt, coldCnt, purgeCnt int64 err = s.hot.ForEachKey(func(c cid.Cid) error { // was it marked? mark, err := markSet.Has(c) @@ -689,11 +727,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("cold collection done", "took", time.Since(startCollect)) - log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) - stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) - stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt) + s.szKeys = hotCnt * cidKeySize + stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(hotCnt)) + stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(coldCnt)) - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } @@ -702,7 +741,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { // possibly delete objects we didn't have when we were collecting cold objects) s.waitForMissingRefs(markSet) - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } @@ -722,7 +761,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } log.Infow("moving done", "took", time.Since(startMove)) - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } @@ -753,7 +792,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } // wait for the head to catch up so that the current tipset is marked - s.waitForSync() + s.waitForTxnSync() if err := s.checkClosing(); err != nil { return err @@ -773,8 +812,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error purging cold objects: %w", err) } log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) - s.endCriticalSection() + log.Infow("critical section done", "total protected size", s.szProtectedTxns, "total marked live size", s.szMarkedLiveRefs) if err := checkpoint.Close(); err != nil { log.Warnf("error closing checkpoint: %s", err) @@ -788,10 +827,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { if err := os.Remove(s.coldSetPath()); err != nil { log.Warnf("error removing coldset: %s", err) } + if err := os.Remove(s.discardSetPath()); err != nil { + log.Warnf("error removing discardset: %s", err) + } // we are done; do some housekeeping s.endTxnProtect() - s.gcHotstore() + s.gcHotAfterCompaction() err = s.setBaseEpoch(boundaryEpoch) if err != nil { @@ -851,7 +893,7 @@ func (s *SplitStore) beginCriticalSection(markSet MarkSet) error { return nil } -func (s *SplitStore) waitForSync() { +func (s *SplitStore) waitForTxnSync() { log.Info("waiting for sync") if !CheckSyncGap { log.Warnf("If you see this outside of test it is a serious splitstore issue") @@ -870,6 +912,25 @@ func (s *SplitStore) waitForSync() { } } +// Block compaction operations if chain sync has fallen behind +func (s *SplitStore) waitForSync() { + if atomic.LoadInt32(&s.outOfSync) == 0 { + return + } + s.chainSyncMx.Lock() + defer s.chainSyncMx.Unlock() + + for !s.chainSyncFinished { + s.chainSyncCond.Wait() + } +} + +// Combined sync and closing check +func (s *SplitStore) checkYield() error { + s.waitForSync() + return s.checkClosing() +} + func (s *SplitStore) endTxnProtect() { s.txnLk.Lock() defer s.txnLk.Unlock() @@ -904,6 +965,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp copy(toWalk, ts.Cids()) walkCnt := new(int64) scanCnt := new(int64) + szWalk := new(int64) tsRef := func(blkCids []cid.Cid) (cid.Cid, error) { return types.NewTipSetKey(blkCids...).Cid() @@ -939,48 +1001,64 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error computing cid reference to parent tipset") } - if err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking parent tipset cid reference") } + atomic.AddInt64(szWalk, sz) // message are retained if within the inclMsgs boundary if hdr.Height >= inclMsgs && hdr.Height > 0 { if inclMsgs < inclState { // we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we // synced from snapshot and have a long HotStoreMessageRetentionPolicy. - if err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } + atomic.AddInt64(szWalk, sz) - if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil { + sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) } + atomic.AddInt64(szWalk, sz) } else { - if err := s.walkObject(hdr.Messages, visitor, fHot); err != nil { + sz, err = s.walkObject(hdr.Messages, visitor, fHot) + if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } + atomic.AddInt64(szWalk, sz) - if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) } + atomic.AddInt64(szWalk, sz) } } // messages and receipts outside of inclMsgs are included in the cold store if hdr.Height < inclMsgs && hdr.Height > 0 { - if err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } - if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil { + atomic.AddInt64(szWalk, sz) + sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk) + if err != nil { return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) } + atomic.AddInt64(szWalk, sz) } // state is only retained if within the inclState boundary, with the exception of genesis if hdr.Height >= inclState || hdr.Height == 0 { - if err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil { + sz, err := s.walkObject(hdr.ParentStateRoot, visitor, fHot) + if err != nil { return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) } + atomic.AddInt64(szWalk, sz) atomic.AddInt64(scanCnt, 1) } @@ -998,13 +1076,15 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err != nil { return xerrors.Errorf("error computing cid reference to parent tipset") } - if err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk); err != nil { + sz, err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk) + if err != nil { return xerrors.Errorf("error walking parent tipset cid reference") } + atomic.AddInt64(szWalk, sz) for len(toWalk) > 0 { // walking can take a while, so check this with every opportunity - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } @@ -1044,123 +1124,129 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp } } - log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt) - + log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt, "walk size", szWalk) + s.szWalk = atomic.LoadInt64(szWalk) return nil } -func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) error { +func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) (int64, error) { + var sz int64 visit, err := visitor.Visit(c) if err != nil { - return xerrors.Errorf("error visiting object: %w", err) + return 0, xerrors.Errorf("error visiting object: %w", err) } if !visit { - return nil + return sz, nil } if err := f(c); err != nil { if err == errStopWalk { - return nil + return sz, nil } - return err + return 0, err } if c.Prefix().Codec != cid.DagCBOR { - return nil + return sz, nil } // check this before recursing - if err := s.checkClosing(); err != nil { - return err + if err := s.checkYield(); err != nil { + return 0, err } var links []cid.Cid err = s.view(c, func(data []byte) error { + sz += int64(len(data)) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { links = append(links, c) }) }) if err != nil { - return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) } for _, c := range links { - err := s.walkObject(c, visitor, f) + szLink, err := s.walkObject(c, visitor, f) if err != nil { - return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err) } + sz += szLink } - return nil + return sz, nil } // like walkObject, but the object may be potentially incomplete (references missing) -func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) error { +func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int64, error) { + var sz int64 visit, err := visitor.Visit(c) if err != nil { - return xerrors.Errorf("error visiting object: %w", err) + return 0, xerrors.Errorf("error visiting object: %w", err) } if !visit { - return nil + return sz, nil } // occurs check -- only for DAGs if c.Prefix().Codec == cid.DagCBOR { has, err := s.has(c) if err != nil { - return xerrors.Errorf("error occur checking %s: %w", c, err) + return 0, xerrors.Errorf("error occur checking %s: %w", c, err) } if !has { err = missing(c) if err == errStopWalk { - return nil + return sz, nil } - return err + return 0, err } } if err := f(c); err != nil { if err == errStopWalk { - return nil + return sz, nil } - return err + return 0, err } if c.Prefix().Codec != cid.DagCBOR { - return nil + return sz, nil } // check this before recursing - if err := s.checkClosing(); err != nil { - return err + if err := s.checkYield(); err != nil { + return sz, err } var links []cid.Cid err = s.view(c, func(data []byte) error { + sz += int64(len(data)) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { links = append(links, c) }) }) if err != nil { - return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) } for _, c := range links { - err := s.walkObjectIncomplete(c, visitor, f, missing) + szLink, err := s.walkObjectIncomplete(c, visitor, f, missing) if err != nil { - return xerrors.Errorf("error walking link (cid: %s): %w", c, err) + return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err) } + sz += szLink } - return nil + return sz, nil } // internal version used during compaction and related operations @@ -1223,7 +1309,7 @@ func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error { batch := make([]blocks.Block, 0, batchSize) err := coldr.ForEach(func(c cid.Cid) error { - if err := s.checkClosing(); err != nil { + if err := s.checkYield(); err != nil { return err } blk, err := s.hot.Get(s.ctx, c) @@ -1426,8 +1512,9 @@ func (s *SplitStore) completeCompaction() error { } s.compactType = none - // Note: at this point we can start the splitstore; a compaction should run on - // the first head change, which will trigger gc on the hotstore. + // Note: at this point we can start the splitstore; base epoch is not + // incremented here so a compaction should run on the first head + // change, which will trigger gc on the hotstore. // We don't mind the second (back-to-back) compaction as the head will // have advanced during marking and coldset accumulation. return nil @@ -1485,6 +1572,13 @@ func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint, return nil } +func (s *SplitStore) clearSizeMeasurements() { + s.szKeys = 0 + s.szMarkedLiveRefs = 0 + s.szProtectedTxns = 0 + s.szWalk = 0 +} + // I really don't like having this code, but we seem to have some occasional DAG references with // missing constituents. During testing in mainnet *some* of these references *sometimes* appeared // after a little bit. @@ -1525,7 +1619,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSet) { missing = make(map[cid.Cid]struct{}) for c := range towalk { - err := s.walkObjectIncomplete(c, visitor, + _, err := s.walkObjectIncomplete(c, visitor, func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go index d092fbb9b..7461e338d 100644 --- a/blockstore/splitstore/splitstore_expose.go +++ b/blockstore/splitstore/splitstore_expose.go @@ -77,6 +77,10 @@ func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error return size, err } +func (es *exposedSplitStore) Flush(ctx context.Context) error { + return es.s.Flush(ctx) +} + func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error { return es.s.Put(ctx, blk) } diff --git a/blockstore/splitstore/splitstore_gc.go b/blockstore/splitstore/splitstore_gc.go index c9be94d2e..8b154f574 100644 --- a/blockstore/splitstore/splitstore_gc.go +++ b/blockstore/splitstore/splitstore_gc.go @@ -7,23 +7,74 @@ import ( bstore "github.com/filecoin-project/lotus/blockstore" ) -func (s *SplitStore) gcHotstore() { +const ( + // Fraction of garbage in badger vlog for online GC traversal to collect garbage + AggressiveOnlineGCThreshold = 0.0001 +) + +func (s *SplitStore) gcHotAfterCompaction() { + // Measure hotstore size, determine if we should do full GC, determine if we can do full GC. + // We should do full GC if + // FullGCFrequency is specified and compaction index matches frequency + // OR HotstoreMaxSpaceTarget is specified and total moving space is within 150 GB of target + // We can do full if + // HotstoreMaxSpaceTarget is not specified + // OR total moving space would not exceed 50 GB below target + // + // a) If we should not do full GC => online GC + // b) If we should do full GC and can => moving GC + // c) If we should do full GC and can't => aggressive online GC + getSize := func() int64 { + sizer, ok := s.hot.(bstore.BlockstoreSize) + if ok { + size, err := sizer.Size() + if err != nil { + log.Warnf("error getting hotstore size: %s, estimating empty hot store for targeting", err) + return 0 + } + return size + } + log.Errorf("Could not measure hotstore size, assuming it is 0 bytes, which it is not") + return 0 + } + hotSize := getSize() + + copySizeApprox := s.szKeys + s.szMarkedLiveRefs + s.szProtectedTxns + s.szWalk + shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceThreshold) + shouldFreq := s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 + shouldDoFull := shouldTarget || shouldFreq + canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceSafetyBuffer) + log.Debugw("approximating new hot store size", "key size", s.szKeys, "marked live refs", s.szMarkedLiveRefs, "protected txns", s.szProtectedTxns, "walked DAG", s.szWalk) + log.Infof("measured hot store size: %d, approximate new size: %d, should do full %t, can do full %t", hotSize, copySizeApprox, shouldDoFull, canDoFull) + var opts []bstore.BlockstoreGCOption - if s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 { + if shouldDoFull && canDoFull { opts = append(opts, bstore.WithFullGC(true)) + } else if shouldDoFull && !canDoFull { + log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, s.cfg.HotstoreMaxSpaceSafetyBuffer, s.cfg.HotstoreMaxSpaceTarget) + log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`") + log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at aggressive thresholds (< 0.01) with `lotus chain prune hot`") + + opts = append(opts, bstore.WithThreshold(AggressiveOnlineGCThreshold)) } if err := s.gcBlockstore(s.hot, opts); err != nil { log.Warnf("error garbage collecting hostore: %s", err) } + log.Infof("measured hot store size after GC: %d", getSize()) } func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error { + if err := s.checkYield(); err != nil { + return err + } if gc, ok := b.(bstore.BlockstoreGC); ok { log.Info("garbage collecting blockstore") startGC := time.Now() - if err := gc.CollectGarbage(opts...); err != nil { + opts = append(opts, bstore.WithCheckFreq(90*time.Second)) + opts = append(opts, bstore.WithCheck(s.checkYield)) + if err := gc.CollectGarbage(s.ctx, opts...); err != nil { return err } @@ -33,3 +84,19 @@ func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreG return fmt.Errorf("blockstore doesn't support garbage collection: %T", b) } + +func (s *SplitStore) gcBlockstoreOnce(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error { + if gc, ok := b.(bstore.BlockstoreGCOnce); ok { + log.Debug("gc blockstore once") + startGC := time.Now() + + if err := gc.GCOnce(s.ctx, opts...); err != nil { + return err + } + + log.Debugw("gc blockstore once done", "took", time.Since(startGC)) + return nil + } + + return fmt.Errorf("blockstore doesn't support gc once: %T", b) +} diff --git a/blockstore/splitstore/splitstore_prune.go b/blockstore/splitstore/splitstore_prune.go index 6a26c00d2..08d5b8cca 100644 --- a/blockstore/splitstore/splitstore_prune.go +++ b/blockstore/splitstore/splitstore_prune.go @@ -47,6 +47,23 @@ var ( PruneThreshold = 7 * build.Finality ) +// GCHotstore runs online GC on the chain state in the hotstore according the to options specified +func (s *SplitStore) GCHotStore(opts api.HotGCOpts) error { + if opts.Moving { + gcOpts := []bstore.BlockstoreGCOption{bstore.WithFullGC(true)} + return s.gcBlockstore(s.hot, gcOpts) + } + + gcOpts := []bstore.BlockstoreGCOption{bstore.WithThreshold(opts.Threshold)} + var err error + if opts.Periodic { + err = s.gcBlockstore(s.hot, gcOpts) + } else { + err = s.gcBlockstoreOnce(s.hot, gcOpts) + } + return err +} + // PruneChain instructs the SplitStore to prune chain state in the coldstore, according to the // options specified. func (s *SplitStore) PruneChain(opts api.PruneOpts) error { @@ -329,9 +346,9 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool, } s.pruneIndex++ - err = s.ds.Put(s.ctx, pruneIndexKey, int64ToBytes(s.compactionIndex)) + err = s.ds.Put(s.ctx, pruneIndexKey, int64ToBytes(s.pruneIndex)) if err != nil { - return xerrors.Errorf("error saving compaction index: %w", err) + return xerrors.Errorf("error saving prune index: %w", err) } return nil diff --git a/blockstore/splitstore/splitstore_reify.go b/blockstore/splitstore/splitstore_reify.go index aa14f090a..07efedead 100644 --- a/blockstore/splitstore/splitstore_reify.go +++ b/blockstore/splitstore/splitstore_reify.go @@ -101,7 +101,7 @@ func (s *SplitStore) doReify(c cid.Cid) { defer s.txnLk.RUnlock() count := 0 - err := s.walkObjectIncomplete(c, newTmpVisitor(), + _, err := s.walkObjectIncomplete(c, newTmpVisitor(), func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index c97a9d01c..68e1bfb65 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -757,6 +757,8 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error { return nil } +func (b *mockStore) Flush(context.Context) error { return nil } + func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return nil, errors.New("not implemented") } diff --git a/blockstore/sync.go b/blockstore/sync.go index 4f97027ae..652943dca 100644 --- a/blockstore/sync.go +++ b/blockstore/sync.go @@ -20,6 +20,8 @@ type SyncBlockstore struct { bs MemBlockstore // specifically use a memStore to save indirection overhead. } +func (*SyncBlockstore) Flush(context.Context) error { return nil } + func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { m.mu.Lock() defer m.mu.Unlock() diff --git a/blockstore/timed.go b/blockstore/timed.go index dda2e1958..3deabb7b8 100644 --- a/blockstore/timed.go +++ b/blockstore/timed.go @@ -93,6 +93,16 @@ func (t *TimedCacheBlockstore) rotate() { t.mu.Unlock() } +func (t *TimedCacheBlockstore) Flush(ctx context.Context) error { + t.mu.Lock() + defer t.mu.Unlock() + + if err := t.active.Flush(ctx); err != nil { + return err + } + return t.inactive.Flush(ctx) +} + func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error { // Don't check the inactive set here. We want to keep this block for at // least one interval. diff --git a/blockstore/union.go b/blockstore/union.go index 3372cd20c..ae6f81955 100644 --- a/blockstore/union.go +++ b/blockstore/union.go @@ -55,6 +55,15 @@ func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, er return size, err } +func (m unionBlockstore) Flush(ctx context.Context) (err error) { + for _, bs := range m { + if err = bs.Flush(ctx); err != nil { + break + } + } + return err +} + func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) { for _, bs := range m { if err = bs.Put(ctx, block); err != nil { diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 1dfe553d5..7d621f3ee 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz index 25b198347..1988262e5 100644 Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 470a9c5a4..47bda504c 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 3f73814cf..204fa2477 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index fe5618f60..dd7386863 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -26,17 +26,13 @@ const UnixfsLinksPerLevel = 1024 const AllowableClockDriftSecs = uint64(1) -// TODO: nv19: Re-enable when migration is setup -//// Used by tests and some obscure tooling -///* inline-gen template -// -//const TestNetworkVersion = network.Version{{.latestNetworkVersion}} -// -///* inline-gen start */ +// Used by tests and some obscure tooling +/* inline-gen template +const TestNetworkVersion = network.Version{{.latestNetworkVersion}} +/* inline-gen start */ +const TestNetworkVersion = network.Version20 -const TestNetworkVersion = network.Version18 - -///* inline-gen end */ +/* inline-gen end */ // Epochs const ForkLengthThreshold = Finality diff --git a/build/version.go b/build/version.go index 3db9ab4e1..de16623a5 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.22.1" +const BuildVersion = "1.23.0" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/agen/main.go b/chain/actors/agen/main.go index b9f3a22a4..811ea27e9 100644 --- a/chain/actors/agen/main.go +++ b/chain/actors/agen/main.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "go/format" - "io/ioutil" "os" "path/filepath" "strconv" @@ -66,7 +65,7 @@ func generateAdapters() error { } { - af, err := ioutil.ReadFile(filepath.Join(actDir, "actor.go.template")) + af, err := os.ReadFile(filepath.Join(actDir, "actor.go.template")) if err != nil { return xerrors.Errorf("loading actor template: %w", err) } @@ -90,7 +89,7 @@ func generateAdapters() error { return err } - if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("%s.go", act)), fmted, 0666); err != nil { + if err := os.WriteFile(filepath.Join(actDir, fmt.Sprintf("%s.go", act)), fmted, 0666); err != nil { return err } } @@ -100,7 +99,7 @@ func generateAdapters() error { } func generateState(actDir string, versions []int) error { - af, err := ioutil.ReadFile(filepath.Join(actDir, "state.go.template")) + af, err := os.ReadFile(filepath.Join(actDir, "state.go.template")) if err != nil { if os.IsNotExist(err) { return nil // skip @@ -123,7 +122,7 @@ func generateState(actDir string, versions []int) error { return err } - if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("v%d.go", version)), b.Bytes(), 0666); err != nil { + if err := os.WriteFile(filepath.Join(actDir, fmt.Sprintf("v%d.go", version)), b.Bytes(), 0666); err != nil { return err } } @@ -132,7 +131,7 @@ func generateState(actDir string, versions []int) error { } func generateMessages(actDir string) error { - af, err := ioutil.ReadFile(filepath.Join(actDir, "message.go.template")) + af, err := os.ReadFile(filepath.Join(actDir, "message.go.template")) if err != nil { if os.IsNotExist(err) { return nil // skip @@ -155,7 +154,7 @@ func generateMessages(actDir string) error { return err } - if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("message%d.go", version)), b.Bytes(), 0666); err != nil { + if err := os.WriteFile(filepath.Join(actDir, fmt.Sprintf("message%d.go", version)), b.Bytes(), 0666); err != nil { return err } } @@ -165,7 +164,7 @@ func generateMessages(actDir string) error { func generatePolicy(policyPath string) error { - pf, err := ioutil.ReadFile(policyPath + ".template") + pf, err := os.ReadFile(policyPath + ".template") if err != nil { if os.IsNotExist(err) { return nil // skip @@ -187,7 +186,7 @@ func generatePolicy(policyPath string) error { return err } - if err := ioutil.WriteFile(policyPath, b.Bytes(), 0666); err != nil { + if err := os.WriteFile(policyPath, b.Bytes(), 0666); err != nil { return err } @@ -196,7 +195,7 @@ func generatePolicy(policyPath string) error { func generateBuiltin(builtinPath string) error { - bf, err := ioutil.ReadFile(builtinPath + ".template") + bf, err := os.ReadFile(builtinPath + ".template") if err != nil { if os.IsNotExist(err) { return nil // skip @@ -218,7 +217,7 @@ func generateBuiltin(builtinPath string) error { return err } - if err := ioutil.WriteFile(builtinPath, b.Bytes(), 0666); err != nil { + if err := os.WriteFile(builtinPath, b.Bytes(), 0666); err != nil { return err } @@ -227,7 +226,7 @@ func generateBuiltin(builtinPath string) error { func generateRegistry(registryPath string) error { - bf, err := ioutil.ReadFile(registryPath + ".template") + bf, err := os.ReadFile(registryPath + ".template") if err != nil { if os.IsNotExist(err) { return nil // skip @@ -248,7 +247,7 @@ func generateRegistry(registryPath string) error { return err } - if err := ioutil.WriteFile(registryPath, b.Bytes(), 0666); err != nil { + if err := os.WriteFile(registryPath, b.Bytes(), 0666); err != nil { return err } diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template index 9f9efef92..9b779a68d 100644 --- a/chain/actors/builtin/verifreg/actor.go.template +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -83,6 +83,7 @@ type State interface { GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) + GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) GetState() interface{} } diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template index 526dff54e..adcbc22c2 100644 --- a/chain/actors/builtin/verifreg/state.go.template +++ b/chain/actors/builtin/verifreg/state.go.template @@ -170,6 +170,27 @@ func (s *state{{.v}}) GetClaims(providerIdAddr address.Address) (map[ClaimId]Cla {{end}} } +func (s *state{{.v}}) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { +{{if (le .v 8)}} + return nil, xerrors.Errorf("unsupported in actors v{{.v}}") +{{else}} + v{{.v}}Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[abi.SectorNumber][]ClaimId) + for k, v := range v{{.v}}Map { + claims, ok := retMap[v.Sector] + if !ok { + retMap[v.Sector] = []ClaimId{ClaimId(k)} + } else { + retMap[v.Sector] = append(claims, ClaimId(k)) + } + } + + return retMap, err + +{{end}} +} + func (s *state{{.v}}) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index 8d97ebbc2..9913c42c0 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -118,6 +118,12 @@ func (s *state0) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state0) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v0") + +} + func (s *state0) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v10.go b/chain/actors/builtin/verifreg/v10.go index 63b161da3..256f4d2f8 100644 --- a/chain/actors/builtin/verifreg/v10.go +++ b/chain/actors/builtin/verifreg/v10.go @@ -134,6 +134,24 @@ func (s *state10) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state10) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + v10Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[abi.SectorNumber][]ClaimId) + for k, v := range v10Map { + claims, ok := retMap[v.Sector] + if !ok { + retMap[v.Sector] = []ClaimId{ClaimId(k)} + } else { + retMap[v.Sector] = append(claims, ClaimId(k)) + } + } + + return retMap, err + +} + func (s *state10) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v11.go b/chain/actors/builtin/verifreg/v11.go index d2abde6c9..7b7b9e4c0 100644 --- a/chain/actors/builtin/verifreg/v11.go +++ b/chain/actors/builtin/verifreg/v11.go @@ -134,6 +134,24 @@ func (s *state11) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state11) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + v11Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[abi.SectorNumber][]ClaimId) + for k, v := range v11Map { + claims, ok := retMap[v.Sector] + if !ok { + retMap[v.Sector] = []ClaimId{ClaimId(k)} + } else { + retMap[v.Sector] = append(claims, ClaimId(k)) + } + } + + return retMap, err + +} + func (s *state11) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index 61c175c7a..31f7f775d 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -118,6 +118,12 @@ func (s *state2) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state2) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v2") + +} + func (s *state2) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go index 5ba478500..3ea016fd5 100644 --- a/chain/actors/builtin/verifreg/v3.go +++ b/chain/actors/builtin/verifreg/v3.go @@ -119,6 +119,12 @@ func (s *state3) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state3) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v3") + +} + func (s *state3) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go index 3ad739f68..464cc9fdc 100644 --- a/chain/actors/builtin/verifreg/v4.go +++ b/chain/actors/builtin/verifreg/v4.go @@ -119,6 +119,12 @@ func (s *state4) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state4) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v4") + +} + func (s *state4) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go index 1ffe06f3f..17901dd23 100644 --- a/chain/actors/builtin/verifreg/v5.go +++ b/chain/actors/builtin/verifreg/v5.go @@ -119,6 +119,12 @@ func (s *state5) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state5) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v5") + +} + func (s *state5) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v6.go b/chain/actors/builtin/verifreg/v6.go index 9786838d1..68fac64cb 100644 --- a/chain/actors/builtin/verifreg/v6.go +++ b/chain/actors/builtin/verifreg/v6.go @@ -119,6 +119,12 @@ func (s *state6) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state6) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v6") + +} + func (s *state6) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go index e63e27742..e8f3ac739 100644 --- a/chain/actors/builtin/verifreg/v7.go +++ b/chain/actors/builtin/verifreg/v7.go @@ -118,6 +118,12 @@ func (s *state7) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state7) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v7") + +} + func (s *state7) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v8.go b/chain/actors/builtin/verifreg/v8.go index a1edeba34..89393c4d9 100644 --- a/chain/actors/builtin/verifreg/v8.go +++ b/chain/actors/builtin/verifreg/v8.go @@ -118,6 +118,12 @@ func (s *state8) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state8) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + return nil, xerrors.Errorf("unsupported in actors v8") + +} + func (s *state8) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/v9.go b/chain/actors/builtin/verifreg/v9.go index bf6424ba7..ce63c7f94 100644 --- a/chain/actors/builtin/verifreg/v9.go +++ b/chain/actors/builtin/verifreg/v9.go @@ -133,6 +133,24 @@ func (s *state9) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state9) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + v9Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[abi.SectorNumber][]ClaimId) + for k, v := range v9Map { + claims, ok := retMap[v.Sector] + if !ok { + retMap[v.Sector] = []ClaimId{ClaimId(k)} + } else { + retMap[v.Sector] = append(claims, ClaimId(k)) + } + } + + return retMap, err + +} + func (s *state9) ActorKey() string { return manifest.VerifregKey } diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index 678a776bd..eb911ea46 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -137,6 +137,7 @@ type State interface { GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) + GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) GetState() interface{} } diff --git a/chain/badtscache.go b/chain/badtscache.go index 19b79bb9b..0f215dcdc 100644 --- a/chain/badtscache.go +++ b/chain/badtscache.go @@ -3,14 +3,14 @@ package chain import ( "fmt" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/go-cid" "github.com/filecoin-project/lotus/build" ) type BadBlockCache struct { - badBlocks *lru.ARCCache + badBlocks *lru.ARCCache[cid.Cid, BadBlockReason] } type BadBlockReason struct { @@ -43,7 +43,7 @@ func (bbr BadBlockReason) String() string { } func NewBadBlockCache() *BadBlockCache { - cache, err := lru.NewARC(build.BadBlockCacheSize) + cache, err := lru.NewARC[cid.Cid, BadBlockReason](build.BadBlockCacheSize) if err != nil { panic(err) // ok } @@ -66,10 +66,5 @@ func (bts *BadBlockCache) Purge() { } func (bts *BadBlockCache) Has(c cid.Cid) (BadBlockReason, bool) { - rval, ok := bts.badBlocks.Get(c) - if !ok { - return BadBlockReason{}, false - } - - return rval.(BadBlockReason), true + return bts.badBlocks.Get(c) } diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 181fa3046..9b62a7928 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -8,14 +8,14 @@ import ( dchain "github.com/drand/drand/chain" dclient "github.com/drand/drand/client" hclient "github.com/drand/drand/client/http" + "github.com/drand/drand/common/scheme" dlog "github.com/drand/drand/log" gclient "github.com/drand/drand/lp2p/client" "github.com/drand/kyber" - kzap "github.com/go-kit/kit/log/zap" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" logging "github.com/ipfs/go-log/v2" pubsub "github.com/libp2p/go-libp2p-pubsub" - "go.uber.org/zap/zapcore" + "go.uber.org/zap" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -61,7 +61,7 @@ type DrandBeacon struct { filGenTime uint64 filRoundTime uint64 - localCache *lru.Cache + localCache *lru.Cache[uint64, *types.BeaconEntry] } // DrandHTTPClient interface overrides the user agent used by drand @@ -69,6 +69,18 @@ type DrandHTTPClient interface { SetUserAgent(string) } +type logger struct { + *zap.SugaredLogger +} + +func (l *logger) With(args ...interface{}) dlog.Logger { + return &logger{l.SugaredLogger.With(args...)} +} + +func (l *logger) Named(s string) dlog.Logger { + return &logger{l.SugaredLogger.Named(s)} +} + func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) { if genesisTs == 0 { panic("what are you doing this cant be zero") @@ -79,9 +91,6 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err) } - dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger( - log.SugaredLogger.Desugar(), zapcore.InfoLevel)) - var clients []dclient.Client for _, url := range config.Servers { hc, err := hclient.NewWithInfo(url, drandChain, nil) @@ -96,7 +105,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes opts := []dclient.Option{ dclient.WithChainInfo(drandChain), dclient.WithCacheSize(1024), - dclient.WithLogger(dlogger), + dclient.WithLogger(&logger{&log.SugaredLogger}), } if ps != nil { @@ -110,7 +119,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes return nil, xerrors.Errorf("creating drand client: %w", err) } - lc, err := lru.New(1024) + lc, err := lru.New[uint64, *types.BeaconEntry](1024) if err != nil { return nil, err } @@ -160,16 +169,12 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re return out } func (db *DrandBeacon) cacheValue(e types.BeaconEntry) { - db.localCache.Add(e.Round, e) + db.localCache.Add(e.Round, &e) } func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { - v, ok := db.localCache.Get(round) - if !ok { - return nil - } - e, _ := v.(types.BeaconEntry) - return &e + v, _ := db.localCache.Get(round) + return v } func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { @@ -194,7 +199,7 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr Round: curr.Round, Signature: curr.Data, } - err := dchain.VerifyBeacon(db.pubkey, b) + err := dchain.NewVerifier(scheme.GetSchemeFromEnv()).VerifyBeacon(*b, db.pubkey) if err == nil { db.cacheValue(curr) } diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index e2819f4e9..7269139ca 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -3,6 +3,7 @@ package drand import ( + "context" "os" "testing" @@ -20,11 +21,11 @@ func TestPrintGroupInfo(t *testing.T) { c, err := hclient.New(server, nil, nil) assert.NoError(t, err) cg := c.(interface { - FetchChainInfo(groupHash []byte) (*dchain.Info, error) + FetchChainInfo(ctx context.Context, groupHash []byte) (*dchain.Info, error) }) - chain, err := cg.FetchChainInfo(nil) + chain, err := cg.FetchChainInfo(context.Background(), nil) assert.NoError(t, err) - err = chain.ToJSON(os.Stdout) + err = chain.ToJSON(os.Stdout, nil) assert.NoError(t, err) } diff --git a/chain/block_receipt_tracker.go b/chain/block_receipt_tracker.go index 58de71e19..9c1e035a2 100644 --- a/chain/block_receipt_tracker.go +++ b/chain/block_receipt_tracker.go @@ -5,7 +5,7 @@ import ( "sync" "time" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/libp2p/go-libp2p/core/peer" "github.com/filecoin-project/lotus/build" @@ -17,7 +17,7 @@ type blockReceiptTracker struct { // using an LRU cache because i don't want to handle all the edge cases for // manual cleanup and maintenance of a fixed size set - cache *lru.Cache + cache *lru.Cache[types.TipSetKey, *peerSet] } type peerSet struct { @@ -25,7 +25,7 @@ type peerSet struct { } func newBlockReceiptTracker() *blockReceiptTracker { - c, _ := lru.New(512) + c, _ := lru.New[types.TipSetKey, *peerSet](512) return &blockReceiptTracker{ cache: c, } @@ -46,20 +46,18 @@ func (brt *blockReceiptTracker) Add(p peer.ID, ts *types.TipSet) { return } - val.(*peerSet).peers[p] = build.Clock.Now() + val.peers[p] = build.Clock.Now() } func (brt *blockReceiptTracker) GetPeers(ts *types.TipSet) []peer.ID { brt.lk.Lock() defer brt.lk.Unlock() - val, ok := brt.cache.Get(ts.Key()) + ps, ok := brt.cache.Get(ts.Key()) if !ok { return nil } - ps := val.(*peerSet) - out := make([]peer.ID, 0, len(ps.peers)) for p := range ps.peers { out = append(out, p) diff --git a/chain/consensus/common.go b/chain/consensus/common.go new file mode 100644 index 000000000..1d9fb3646 --- /dev/null +++ b/chain/consensus/common.go @@ -0,0 +1,514 @@ +package consensus + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/multiformats/go-varint" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/network" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/api" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/lib/async" + "github.com/filecoin-project/lotus/metrics" +) + +// Common operations shared by all consensus algorithm implementations. +var log = logging.Logger("consensus-common") + +// RunAsyncChecks accepts a list of checks to perform in parallel. +// +// Each consensus algorithm may choose to perform a set of different +// checks when a new blocks is received. +func RunAsyncChecks(ctx context.Context, await []async.ErrorFuture) error { + var merr error + for _, fut := range await { + if err := fut.AwaitContext(ctx); err != nil { + merr = multierror.Append(merr, err) + } + } + if merr != nil { + mulErr := merr.(*multierror.Error) + mulErr.ErrorFormat = func(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %+v", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) + } + return mulErr + } + + return nil +} + +// CommonBlkChecks performed by all consensus implementations. +func CommonBlkChecks(ctx context.Context, sm *stmgr.StateManager, cs *store.ChainStore, + b *types.FullBlock, baseTs *types.TipSet) []async.ErrorFuture { + h := b.Header + msgsCheck := async.Err(func() error { + if b.Cid() == build.WhitelistedBlock { + return nil + } + + if err := checkBlockMessages(ctx, sm, cs, b, baseTs); err != nil { + return xerrors.Errorf("block had invalid messages: %w", err) + } + return nil + }) + + baseFeeCheck := async.Err(func() error { + baseFee, err := cs.ComputeBaseFee(ctx, baseTs) + if err != nil { + return xerrors.Errorf("computing base fee: %w", err) + } + if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 { + return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)", + b.Header.ParentBaseFee, baseFee) + } + return nil + }) + + stateRootCheck := async.Err(func() error { + stateroot, precp, err := sm.TipSetState(ctx, baseTs) + if err != nil { + return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) + } + + if stateroot != h.ParentStateRoot { + msgs, err := cs.MessagesForTipset(ctx, baseTs) + if err != nil { + log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) + } else { + log.Warn("Messages for tipset with mismatching state:") + for i, m := range msgs { + mm := m.VMMessage() + log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) + } + } + + return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot) + } + + if precp != h.ParentMessageReceipts { + return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) + } + return nil + }) + + return []async.ErrorFuture{ + msgsCheck, + baseFeeCheck, + stateRootCheck, + } +} + +func IsValidForSending(nv network.Version, act *types.Actor) bool { + // Before nv18 (Hygge), we only supported built-in account actors as senders. + // + // Note: this gate is probably superfluous, since: + // 1. Placeholder actors cannot be created before nv18. + // 2. EthAccount actors cannot be created before nv18. + // 3. Delegated addresses cannot be created before nv18. + // + // But it's a safeguard. + // + // Note 2: ad-hoc checks for network versions like this across the codebase + // will be problematic with networks with diverging version lineages + // (e.g. Hyperspace). We need to revisit this strategy entirely. + if nv < network.Version18 { + return builtin.IsAccountActor(act.Code) + } + + // After nv18, we also support other kinds of senders. + if builtin.IsAccountActor(act.Code) || builtin.IsEthAccountActor(act.Code) { + return true + } + + // Allow placeholder actors with a delegated address and nonce 0 to send a message. + // These will be converted to an EthAccount actor on first send. + if !builtin.IsPlaceholderActor(act.Code) || act.Nonce != 0 || act.Address == nil || act.Address.Protocol() != address.Delegated { + return false + } + + // Only allow such actors to send if their delegated address is in the EAM's namespace. + id, _, err := varint.FromUvarint(act.Address.Payload()) + return err == nil && id == builtintypes.EthereumAddressManagerActorID +} + +func checkBlockMessages(ctx context.Context, sm *stmgr.StateManager, cs *store.ChainStore, b *types.FullBlock, baseTs *types.TipSet) error { + { + var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type + var pubks [][]byte + + for _, m := range b.BlsMessages { + sigCids = append(sigCids, m.Cid()) + + pubk, err := sm.GetBlsPublicKey(ctx, m.From, baseTs) + if err != nil { + return xerrors.Errorf("failed to load bls public to validate block: %w", err) + } + + pubks = append(pubks, pubk) + } + + if err := VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil { + return xerrors.Errorf("bls aggregate signature was invalid: %w", err) + } + } + + nonces := make(map[address.Address]uint64) + + stateroot, _, err := sm.TipSetState(ctx, baseTs) + if err != nil { + return xerrors.Errorf("failed to compute tipsettate for %s: %w", baseTs.Key(), err) + } + + st, err := state.LoadStateTree(cs.ActorStore(ctx), stateroot) + if err != nil { + return xerrors.Errorf("failed to load base state tree: %w", err) + } + + nv := sm.GetNetworkVersion(ctx, b.Header.Height) + pl := vm.PricelistByEpoch(b.Header.Height) + var sumGasLimit int64 + checkMsg := func(msg types.ChainMsg) error { + m := msg.VMMessage() + + // Phase 1: syntactic validation, as defined in the spec + minGas := pl.OnChainMessage(msg.ChainLength()) + if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil { + return xerrors.Errorf("msg %s invalid for block inclusion: %w", m.Cid(), err) + } + + // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit + // So below is overflow safe + sumGasLimit += m.GasLimit + if sumGasLimit > build.BlockGasLimit { + return xerrors.Errorf("block gas limit exceeded") + } + + // Phase 2: (Partial) semantic validation: + // the sender exists and is an account actor, and the nonces make sense + var sender address.Address + if nv >= network.Version13 { + sender, err = st.LookupID(m.From) + if err != nil { + return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err) + } + } else { + sender = m.From + } + + if _, ok := nonces[sender]; !ok { + // `GetActor` does not validate that this is an account actor. + act, err := st.GetActor(sender) + if err != nil { + return xerrors.Errorf("failed to get actor: %w", err) + } + + if !IsValidForSending(nv, act) { + return xerrors.New("Sender must be an account actor") + } + nonces[sender] = act.Nonce + } + + if nonces[sender] != m.Nonce { + return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) + } + nonces[sender]++ + + return nil + } + + // Validate message arrays in a temporary blockstore. + tmpbs := bstore.NewMemory() + tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) + + bmArr := blockadt.MakeEmptyArray(tmpstore) + for i, m := range b.BlsMessages { + if err := checkMsg(m); err != nil { + return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) + } + + c, err := store.PutMessage(ctx, tmpbs, m) + if err != nil { + return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) + } + + k := cbg.CborCid(c) + if err := bmArr.Set(uint64(i), &k); err != nil { + return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) + } + } + + smArr := blockadt.MakeEmptyArray(tmpstore) + for i, m := range b.SecpkMessages { + if nv >= network.Version14 && !IsValidSecpkSigType(nv, m.Signature.Type) { + return xerrors.Errorf("block had invalid signed message at index %d: %w", i, err) + } + + if err := checkMsg(m); err != nil { + return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) + } + + // `From` being an account actor is only validated inside the `vm.ResolveToDeterministicAddr` call + // in `StateManager.ResolveToDeterministicAddress` here (and not in `checkMsg`). + kaddr, err := sm.ResolveToDeterministicAddress(ctx, m.Message.From, baseTs) + if err != nil { + return xerrors.Errorf("failed to resolve key addr: %w", err) + } + + if err := AuthenticateMessage(m, kaddr); err != nil { + return xerrors.Errorf("failed to validate signature: %w", err) + } + + c, err := store.PutMessage(ctx, tmpbs, m) + if err != nil { + return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) + } + k := cbg.CborCid(c) + if err := smArr.Set(uint64(i), &k); err != nil { + return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) + } + } + + bmroot, err := bmArr.Root() + if err != nil { + return xerrors.Errorf("failed to root bls msgs: %w", err) + + } + + smroot, err := smArr.Root() + if err != nil { + return xerrors.Errorf("failed to root secp msgs: %w", err) + } + + mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ + BlsMessages: bmroot, + SecpkMessages: smroot, + }) + if err != nil { + return xerrors.Errorf("failed to put msg meta: %w", err) + } + + if b.Header.Messages != mrcid { + return fmt.Errorf("messages didnt match message root in header") + } + + // Finally, flush. + err = vm.Copy(ctx, tmpbs, cs.ChainBlockstore(), mrcid) + if err != nil { + return xerrors.Errorf("failed to flush:%w", err) + } + + return nil +} + +// CreateBlockHeader generates the block header from the block template of +// the block being proposed. +func CreateBlockHeader(ctx context.Context, sm *stmgr.StateManager, pts *types.TipSet, + bt *api.BlockTemplate) (*types.BlockHeader, []*types.Message, []*types.SignedMessage, error) { + + st, recpts, err := sm.TipSetState(ctx, pts) + if err != nil { + return nil, nil, nil, xerrors.Errorf("failed to load tipset state: %w", err) + } + next := &types.BlockHeader{ + Miner: bt.Miner, + Parents: bt.Parents.Cids(), + Ticket: bt.Ticket, + ElectionProof: bt.Eproof, + + BeaconEntries: bt.BeaconValues, + Height: bt.Epoch, + Timestamp: bt.Timestamp, + WinPoStProof: bt.WinningPoStProof, + ParentStateRoot: st, + ParentMessageReceipts: recpts, + } + + var blsMessages []*types.Message + var secpkMessages []*types.SignedMessage + + var blsMsgCids, secpkMsgCids []cid.Cid + var blsSigs []crypto.Signature + nv := sm.GetNetworkVersion(ctx, bt.Epoch) + for _, msg := range bt.Messages { + if msg.Signature.Type == crypto.SigTypeBLS { + blsSigs = append(blsSigs, msg.Signature) + blsMessages = append(blsMessages, &msg.Message) + + c, err := sm.ChainStore().PutMessage(ctx, &msg.Message) + if err != nil { + return nil, nil, nil, err + } + + blsMsgCids = append(blsMsgCids, c) + } else if IsValidSecpkSigType(nv, msg.Signature.Type) { + c, err := sm.ChainStore().PutMessage(ctx, msg) + if err != nil { + return nil, nil, nil, err + } + + secpkMsgCids = append(secpkMsgCids, c) + secpkMessages = append(secpkMessages, msg) + + } else { + return nil, nil, nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type) + } + } + + store := sm.ChainStore().ActorStore(ctx) + blsmsgroot, err := ToMessagesArray(store, blsMsgCids) + if err != nil { + return nil, nil, nil, xerrors.Errorf("building bls amt: %w", err) + } + secpkmsgroot, err := ToMessagesArray(store, secpkMsgCids) + if err != nil { + return nil, nil, nil, xerrors.Errorf("building secpk amt: %w", err) + } + + mmcid, err := store.Put(store.Context(), &types.MsgMeta{ + BlsMessages: blsmsgroot, + SecpkMessages: secpkmsgroot, + }) + if err != nil { + return nil, nil, nil, err + } + next.Messages = mmcid + + aggSig, err := AggregateSignatures(blsSigs) + if err != nil { + return nil, nil, nil, err + } + + next.BLSAggregate = aggSig + pweight, err := sm.ChainStore().Weight(ctx, pts) + if err != nil { + return nil, nil, nil, err + } + next.ParentWeight = pweight + + baseFee, err := sm.ChainStore().ComputeBaseFee(ctx, pts) + if err != nil { + return nil, nil, nil, xerrors.Errorf("computing base fee: %w", err) + } + next.ParentBaseFee = baseFee + + return next, blsMessages, secpkMessages, err + +} + +// Basic sanity-checks performed when a block is proposed locally. +func validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) { + stats.Record(ctx, metrics.BlockPublished.M(1)) + + if size := msg.Size(); size > 1<<20-1<<15 { + log.Errorf("ignoring oversize block (%dB)", size) + return pubsub.ValidationIgnore, "oversize_block" + } + + blk, what, err := decodeAndCheckBlock(msg) + if err != nil { + log.Errorf("got invalid local block: %s", err) + return pubsub.ValidationIgnore, what + } + + msg.ValidatorData = blk + stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) + return pubsub.ValidationAccept, "" +} + +func decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) { + blk, err := types.DecodeBlockMsg(msg.GetData()) + if err != nil { + return nil, "invalid", xerrors.Errorf("error decoding block: %w", err) + } + + if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit { + return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count) + } + + // make sure we have a signature + if blk.Header.BlockSig == nil { + return nil, "missing_signature", fmt.Errorf("block without a signature") + } + + return blk, "", nil +} + +func validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { + // TODO there has to be a simpler way to do this without the blockstore dance + // block headers use adt0 + store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory())) + bmArr := blockadt.MakeEmptyArray(store) + smArr := blockadt.MakeEmptyArray(store) + + for i, m := range msg.BlsMessages { + c := cbg.CborCid(m) + if err := bmArr.Set(uint64(i), &c); err != nil { + return err + } + } + + for i, m := range msg.SecpkMessages { + c := cbg.CborCid(m) + if err := smArr.Set(uint64(i), &c); err != nil { + return err + } + } + + bmroot, err := bmArr.Root() + if err != nil { + return err + } + + smroot, err := smArr.Root() + if err != nil { + return err + } + + mrcid, err := store.Put(store.Context(), &types.MsgMeta{ + BlsMessages: bmroot, + SecpkMessages: smroot, + }) + + if err != nil { + return err + } + + if msg.Header.Messages != mrcid { + return fmt.Errorf("messages didn't match root cid in header") + } + + return nil +} diff --git a/chain/consensus/filcns/compute_state.go b/chain/consensus/compute_state.go similarity index 90% rename from chain/consensus/filcns/compute_state.go rename to chain/consensus/compute_state.go index 9ba70d732..b7914cdf6 100644 --- a/chain/consensus/filcns/compute_state.go +++ b/chain/consensus/compute_state.go @@ -1,8 +1,9 @@ -package filcns +package consensus import ( "context" "sync/atomic" + "time" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" @@ -26,7 +27,6 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/cron" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" @@ -56,10 +56,12 @@ func NewActorRegistry() *vm.ActorRegistry { return inv } -type TipSetExecutor struct{} +type TipSetExecutor struct { + reward RewardFunc +} -func NewTipSetExecutor() *TipSetExecutor { - return &TipSetExecutor{} +func NewTipSetExecutor(r RewardFunc) *TipSetExecutor { + return &TipSetExecutor{reward: r} } func (t *TipSetExecutor) NewActorRegistry() *vm.ActorRegistry { @@ -113,6 +115,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, return sm.VMConstructor()(ctx, vmopt) } + var cronGas int64 + runCron := func(vmCron vm.Interface, epoch abi.ChainEpoch) error { cronMsg := &types.Message{ To: cron.Address, @@ -130,6 +134,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, return xerrors.Errorf("running cron: %w", err) } + cronGas += ret.GasUsed + if em != nil { if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil { return xerrors.Errorf("callback failed on cron message: %w", err) @@ -181,7 +187,9 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, } } - partDone() + vmEarly := partDone() + earlyCronGas := cronGas + cronGas = 0 partDone = metrics.Timer(ctx, metrics.VMApplyMessages) vmi, err := makeVm(pstate, epoch, ts.MinTimestamp()) @@ -196,6 +204,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, processedMsgs = make(map[cid.Cid]struct{}) ) + var msgGas int64 + for _, b := range bms { penalty := types.NewInt(0) gasReward := big.Zero() @@ -210,6 +220,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, return cid.Undef, cid.Undef, err } + msgGas += r.GasUsed + receipts = append(receipts, &r.MessageReceipt) gasReward = big.Add(gasReward, r.GasCosts.MinerTip) penalty = big.Add(penalty, r.GasCosts.MinerPenalty) @@ -227,50 +239,26 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, processedMsgs[m.Cid()] = struct{}{} } - params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{ + params := &reward.AwardBlockRewardParams{ Miner: b.Miner, Penalty: penalty, GasReward: gasReward, WinCount: b.WinCount, - }) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err) } - - rwMsg := &types.Message{ - From: builtin.SystemActorAddr, - To: reward.Address, - Nonce: uint64(epoch), - Value: types.NewInt(0), - GasFeeCap: types.NewInt(0), - GasPremium: types.NewInt(0), - GasLimit: 1 << 30, - Method: reward.Methods.AwardBlockReward, - Params: params, - } - ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg) - if actErr != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr) - } - if em != nil { - if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) - } - } - - if ret.ExitCode != 0 { - return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) + rErr := t.reward(ctx, vmi, em, epoch, ts, params) + if rErr != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("error applying reward: %w", rErr) } } - partDone() + vmMsg := partDone() partDone = metrics.Timer(ctx, metrics.VMApplyCron) if err := runCron(vmi, epoch); err != nil { return cid.Cid{}, cid.Cid{}, err } - partDone() + vmCron := partDone() partDone = metrics.Timer(ctx, metrics.VMApplyFlush) rectarr := blockadt.MakeEmptyArray(sm.ChainStore().ActorStore(ctx)) @@ -306,6 +294,11 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err) } + vmFlush := partDone() + partDone = func() time.Duration { return time.Duration(0) } + + log.Infow("ApplyBlocks stats", "early", vmEarly, "earlyCronGas", earlyCronGas, "vmMsg", vmMsg, "msgGas", msgGas, "vmCron", vmCron, "cronGas", cronGas, "vmFlush", vmFlush, "epoch", epoch, "tsk", ts.Key()) + stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))), metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied)))) diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index 2cd680746..2e3baa4db 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -4,46 +4,36 @@ import ( "bytes" "context" "errors" - "fmt" "os" - "strings" "time" - "github.com/hashicorp/go-multierror" "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/multiformats/go-varint" - cbg "github.com/whyrusleeping/cbor-gen" - "go.opencensus.io/stats" "go.opencensus.io/trace" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtintypes "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" - blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" - bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/rand" - "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/lib/async" "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -69,6 +59,39 @@ type FilecoinEC struct { // the theoretical max height based on systime are quickly rejected const MaxHeightDrift = 5 +var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonitor, + epoch abi.ChainEpoch, ts *types.TipSet, params *reward.AwardBlockRewardParams) error { + ser, err := actors.SerializeParams(params) + if err != nil { + return xerrors.Errorf("failed to serialize award params: %w", err) + } + rwMsg := &types.Message{ + From: builtin.SystemActorAddr, + To: reward.Address, + Nonce: uint64(epoch), + Value: types.NewInt(0), + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + GasLimit: 1 << 30, + Method: reward.Methods.AwardBlockReward, + Params: ser, + } + ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg) + if actErr != nil { + return xerrors.Errorf("failed to apply reward message: %w", actErr) + } + if em != nil { + if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { + return xerrors.Errorf("callback failed on reward message: %w", err) + } + } + + if ret.ExitCode != 0 { + return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) + } + return nil +} + func NewFilecoinExpectedConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, verifier storiface.Verifier, genesis chain.Genesis) consensus.Consensus { if build.InsecurePoStValidation { log.Warn("*********************************************************************************************") @@ -127,17 +150,6 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix()) } - msgsCheck := async.Err(func() error { - if b.Cid() == build.WhitelistedBlock { - return nil - } - - if err := filec.checkBlockMessages(ctx, b, baseTs); err != nil { - return xerrors.Errorf("block had invalid messages: %w", err) - } - return nil - }) - minerCheck := async.Err(func() error { if err := filec.minerIsValid(ctx, h.Miner, baseTs); err != nil { return xerrors.Errorf("minerIsValid failed: %w", err) @@ -145,17 +157,6 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) return nil }) - baseFeeCheck := async.Err(func() error { - baseFee, err := filec.store.ComputeBaseFee(ctx, baseTs) - if err != nil { - return xerrors.Errorf("computing base fee: %w", err) - } - if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 { - return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)", - b.Header.ParentBaseFee, baseFee) - } - return nil - }) pweight, err := filec.store.Weight(ctx, baseTs) if err != nil { return xerrors.Errorf("getting parent weight: %w", err) @@ -166,34 +167,6 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) b.Header.ParentWeight, pweight) } - stateRootCheck := async.Err(func() error { - stateroot, precp, err := filec.sm.TipSetState(ctx, baseTs) - if err != nil { - return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) - } - - if stateroot != h.ParentStateRoot { - msgs, err := filec.store.MessagesForTipset(ctx, baseTs) - if err != nil { - log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) - } else { - log.Warn("Messages for tipset with mismatching state:") - for i, m := range msgs { - mm := m.VMMessage() - log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) - } - } - - return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot) - } - - if precp != h.ParentMessageReceipts { - return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) - } - - return nil - }) - // Stuff that needs worker address waddr, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, h.Miner) if err != nil { @@ -255,10 +228,11 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) }) blockSigCheck := async.Err(func() error { - if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil { + if err := verifyBlockSignature(ctx, h, waddr); err != nil { return xerrors.Errorf("check block signature failed: %w", err) } return nil + }) beaconValuesCheck := async.Err(func() error { @@ -307,44 +281,17 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) return nil }) - await := []async.ErrorFuture{ + commonChecks := consensus.CommonBlkChecks(ctx, filec.sm, filec.store, b, baseTs) + await := append([]async.ErrorFuture{ minerCheck, tktsCheck, blockSigCheck, beaconValuesCheck, wproofCheck, winnerCheck, - msgsCheck, - baseFeeCheck, - stateRootCheck, - } + }, commonChecks...) - var merr error - for _, fut := range await { - if err := fut.AwaitContext(ctx); err != nil { - merr = multierror.Append(merr, err) - } - } - if merr != nil { - mulErr := merr.(*multierror.Error) - mulErr.ErrorFormat = func(es []error) string { - if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0]) - } - - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %+v", err) - } - - return fmt.Sprintf( - "%d errors occurred:\n\t%s\n\n", - len(es), strings.Join(points, "\n\t")) - } - return mulErr - } - - return nil + return consensus.RunAsyncChecks(ctx, await) } func blockSanityChecks(h *types.BlockHeader) error { @@ -435,209 +382,6 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network. return nil } -func IsValidForSending(nv network.Version, act *types.Actor) bool { - // Before nv18 (Hygge), we only supported built-in account actors as senders. - // - // Note: this gate is probably superfluous, since: - // 1. Placeholder actors cannot be created before nv18. - // 2. EthAccount actors cannot be created before nv18. - // 3. Delegated addresses cannot be created before nv18. - // - // But it's a safeguard. - // - // Note 2: ad-hoc checks for network versions like this across the codebase - // will be problematic with networks with diverging version lineages - // (e.g. Hyperspace). We need to revisit this strategy entirely. - if nv < network.Version18 { - return builtin.IsAccountActor(act.Code) - } - - // After nv18, we also support other kinds of senders. - if builtin.IsAccountActor(act.Code) || builtin.IsEthAccountActor(act.Code) { - return true - } - - // Allow placeholder actors with a delegated address and nonce 0 to send a message. - // These will be converted to an EthAccount actor on first send. - if !builtin.IsPlaceholderActor(act.Code) || act.Nonce != 0 || act.Address == nil || act.Address.Protocol() != address.Delegated { - return false - } - - // Only allow such actors to send if their delegated address is in the EAM's namespace. - id, _, err := varint.FromUvarint(act.Address.Payload()) - return err == nil && id == builtintypes.EthereumAddressManagerActorID -} - -// TODO: We should extract this somewhere else and make the message pool and miner use the same logic -func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error { - { - var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type - var pubks [][]byte - - for _, m := range b.BlsMessages { - sigCids = append(sigCids, m.Cid()) - - pubk, err := filec.sm.GetBlsPublicKey(ctx, m.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to load bls public to validate block: %w", err) - } - - pubks = append(pubks, pubk) - } - - if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil { - return xerrors.Errorf("bls aggregate signature was invalid: %w", err) - } - } - - nonces := make(map[address.Address]uint64) - - stateroot, _, err := filec.sm.TipSetState(ctx, baseTs) - if err != nil { - return xerrors.Errorf("failed to compute tipsettate for %s: %w", baseTs.Key(), err) - } - - st, err := state.LoadStateTree(filec.store.ActorStore(ctx), stateroot) - if err != nil { - return xerrors.Errorf("failed to load base state tree: %w", err) - } - - nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height) - pl := vm.PricelistByEpoch(b.Header.Height) - var sumGasLimit int64 - checkMsg := func(msg types.ChainMsg) error { - m := msg.VMMessage() - - // Phase 1: syntactic validation, as defined in the spec - minGas := pl.OnChainMessage(msg.ChainLength()) - if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil { - return xerrors.Errorf("msg %s invalid for block inclusion: %w", m.Cid(), err) - } - - // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit - // So below is overflow safe - sumGasLimit += m.GasLimit - if sumGasLimit > build.BlockGasLimit { - return xerrors.Errorf("block gas limit exceeded") - } - - // Phase 2: (Partial) semantic validation: - // the sender exists and is an account actor, and the nonces make sense - var sender address.Address - if nv >= network.Version13 { - sender, err = st.LookupID(m.From) - if err != nil { - return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err) - } - } else { - sender = m.From - } - - if _, ok := nonces[sender]; !ok { - // `GetActor` does not validate that this is an account actor. - act, err := st.GetActor(sender) - if err != nil { - return xerrors.Errorf("failed to get actor: %w", err) - } - - if !IsValidForSending(nv, act) { - return xerrors.New("Sender must be an account actor") - } - nonces[sender] = act.Nonce - } - - if nonces[sender] != m.Nonce { - return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) - } - nonces[sender]++ - - return nil - } - - // Validate message arrays in a temporary blockstore. - tmpbs := bstore.NewMemory() - tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) - - bmArr := blockadt.MakeEmptyArray(tmpstore) - for i, m := range b.BlsMessages { - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) - } - - c, err := store.PutMessage(ctx, tmpbs, m) - if err != nil { - return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) - } - - k := cbg.CborCid(c) - if err := bmArr.Set(uint64(i), &k); err != nil { - return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) - } - } - - smArr := blockadt.MakeEmptyArray(tmpstore) - for i, m := range b.SecpkMessages { - if nv >= network.Version14 && !chain.IsValidSecpkSigType(nv, m.Signature.Type) { - return xerrors.Errorf("block had invalid signed message at index %d: %w", i, err) - } - - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) - } - - // `From` being an account actor is only validated inside the `vm.ResolveToDeterministicAddr` call - // in `StateManager.ResolveToDeterministicAddress` here (and not in `checkMsg`). - kaddr, err := filec.sm.ResolveToDeterministicAddress(ctx, m.Message.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to resolve key addr: %w", err) - } - - if err := chain.AuthenticateMessage(m, kaddr); err != nil { - return xerrors.Errorf("failed to validate signature: %w", err) - } - - c, err := store.PutMessage(ctx, tmpbs, m) - if err != nil { - return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) - } - k := cbg.CborCid(c) - if err := smArr.Set(uint64(i), &k); err != nil { - return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) - } - } - - bmroot, err := bmArr.Root() - if err != nil { - return xerrors.Errorf("failed to root bls msgs: %w", err) - - } - - smroot, err := smArr.Root() - if err != nil { - return xerrors.Errorf("failed to root secp msgs: %w", err) - } - - mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - if err != nil { - return xerrors.Errorf("failed to put msg meta: %w", err) - } - - if b.Header.Messages != mrcid { - return fmt.Errorf("messages didnt match message root in header") - } - - // Finally, flush. - err = vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid) - if err != nil { - return xerrors.Errorf("failed to flush:%w", err) - } - - return nil -} - func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { if filec.genesis == nil { return false @@ -693,140 +437,7 @@ func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof [] var ErrSoftFailure = errors.New("soft validation failure") var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power") -func (filec *FilecoinEC) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) { - if self { - return filec.validateLocalBlock(ctx, msg) - } - - // track validation time - begin := build.Clock.Now() - defer func() { - log.Debugf("block validation time: %s", build.Clock.Since(begin)) - }() - - stats.Record(ctx, metrics.BlockReceived.M(1)) - - recordFailureFlagPeer := func(what string) { - // bv.Validate will flag the peer in that case - panic(what) - } - - blk, what, err := filec.decodeAndCheckBlock(msg) - if err != nil { - log.Error("got invalid block over pubsub: ", err) - recordFailureFlagPeer(what) - return pubsub.ValidationReject, what - } - - // validate the block meta: the Message CID in the header must match the included messages - err = filec.validateMsgMeta(ctx, blk) - if err != nil { - log.Warnf("error validating message metadata: %s", err) - recordFailureFlagPeer("invalid_block_meta") - return pubsub.ValidationReject, "invalid_block_meta" - } - - reject, err := filec.validateBlockHeader(ctx, blk.Header) - if err != nil { - if reject == "" { - log.Warn("ignoring block msg: ", err) - return pubsub.ValidationIgnore, reject - } - recordFailureFlagPeer(reject) - return pubsub.ValidationReject, reject - } - - // all good, accept the block - msg.ValidatorData = blk - stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) - return pubsub.ValidationAccept, "" -} - -func (filec *FilecoinEC) validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) { - stats.Record(ctx, metrics.BlockPublished.M(1)) - - if size := msg.Size(); size > 1<<20-1<<15 { - log.Errorf("ignoring oversize block (%dB)", size) - return pubsub.ValidationIgnore, "oversize_block" - } - - blk, what, err := filec.decodeAndCheckBlock(msg) - if err != nil { - log.Errorf("got invalid local block: %s", err) - return pubsub.ValidationIgnore, what - } - - msg.ValidatorData = blk - stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) - return pubsub.ValidationAccept, "" -} - -func (filec *FilecoinEC) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) { - blk, err := types.DecodeBlockMsg(msg.GetData()) - if err != nil { - return nil, "invalid", xerrors.Errorf("error decoding block: %w", err) - } - - if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit { - return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count) - } - - // make sure we have a signature - if blk.Header.BlockSig == nil { - return nil, "missing_signature", fmt.Errorf("block without a signature") - } - - return blk, "", nil -} - -func (filec *FilecoinEC) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { - // TODO there has to be a simpler way to do this without the blockstore dance - // block headers use adt0 - store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory())) - bmArr := blockadt.MakeEmptyArray(store) - smArr := blockadt.MakeEmptyArray(store) - - for i, m := range msg.BlsMessages { - c := cbg.CborCid(m) - if err := bmArr.Set(uint64(i), &c); err != nil { - return err - } - } - - for i, m := range msg.SecpkMessages { - c := cbg.CborCid(m) - if err := smArr.Set(uint64(i), &c); err != nil { - return err - } - } - - bmroot, err := bmArr.Root() - if err != nil { - return err - } - - smroot, err := smArr.Root() - if err != nil { - return err - } - - mrcid, err := store.Put(store.Context(), &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - - if err != nil { - return err - } - - if msg.Header.Messages != mrcid { - return fmt.Errorf("messages didn't match root cid in header") - } - - return nil -} - -func (filec *FilecoinEC) validateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) { +func (filec *FilecoinEC) ValidateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) { // we want to ensure that it is a block from a known miner; we reject blocks from unknown miners // to prevent spam attacks. @@ -901,4 +512,27 @@ func (filec *FilecoinEC) isChainNearSynced() bool { return build.Clock.Since(timestampTime) < 6*time.Hour } +func verifyBlockSignature(ctx context.Context, h *types.BlockHeader, + addr address.Address) error { + return sigs.CheckBlockSignature(ctx, h, addr) +} + +func signBlock(ctx context.Context, w api.Wallet, + addr address.Address, next *types.BlockHeader) error { + + nosigbytes, err := next.SigningBytes() + if err != nil { + return xerrors.Errorf("failed to get signing bytes for block: %w", err) + } + + sig, err := w.WalletSign(ctx, addr, nosigbytes, api.MsgMeta{ + Type: api.MTBlock, + }) + if err != nil { + return xerrors.Errorf("failed to sign new block: %w", err) + } + next.BlockSig = sig + return nil +} + var _ consensus.Consensus = &FilecoinEC{} diff --git a/chain/consensus/filcns/mine.go b/chain/consensus/filcns/mine.go index 234c1d654..956cba252 100644 --- a/chain/consensus/filcns/mine.go +++ b/chain/consensus/filcns/mine.go @@ -3,13 +3,9 @@ package filcns import ( "context" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" @@ -21,11 +17,6 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. return nil, xerrors.Errorf("failed to load parent tipset: %w", err) } - st, recpts, err := filec.sm.TipSetState(ctx, pts) - if err != nil { - return nil, xerrors.Errorf("failed to load tipset state: %w", err) - } - _, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, pts, bt.Epoch) if err != nil { return nil, xerrors.Errorf("getting lookback miner actor state: %w", err) @@ -36,102 +27,15 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. return nil, xerrors.Errorf("failed to get miner worker: %w", err) } - next := &types.BlockHeader{ - Miner: bt.Miner, - Parents: bt.Parents.Cids(), - Ticket: bt.Ticket, - ElectionProof: bt.Eproof, - - BeaconEntries: bt.BeaconValues, - Height: bt.Epoch, - Timestamp: bt.Timestamp, - WinPoStProof: bt.WinningPoStProof, - ParentStateRoot: st, - ParentMessageReceipts: recpts, - } - - var blsMessages []*types.Message - var secpkMessages []*types.SignedMessage - - var blsMsgCids, secpkMsgCids []cid.Cid - var blsSigs []crypto.Signature - nv := filec.sm.GetNetworkVersion(ctx, bt.Epoch) - for _, msg := range bt.Messages { - if msg.Signature.Type == crypto.SigTypeBLS { - blsSigs = append(blsSigs, msg.Signature) - blsMessages = append(blsMessages, &msg.Message) - - c, err := filec.sm.ChainStore().PutMessage(ctx, &msg.Message) - if err != nil { - return nil, err - } - - blsMsgCids = append(blsMsgCids, c) - } else if chain.IsValidSecpkSigType(nv, msg.Signature.Type) { - c, err := filec.sm.ChainStore().PutMessage(ctx, msg) - if err != nil { - return nil, err - } - - secpkMsgCids = append(secpkMsgCids, c) - secpkMessages = append(secpkMessages, msg) - - } else { - return nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type) - } - } - - store := filec.sm.ChainStore().ActorStore(ctx) - blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids) + next, blsMessages, secpkMessages, err := consensus.CreateBlockHeader(ctx, filec.sm, pts, bt) if err != nil { - return nil, xerrors.Errorf("building bls amt: %w", err) - } - secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids) - if err != nil { - return nil, xerrors.Errorf("building secpk amt: %w", err) + return nil, xerrors.Errorf("failed to process messages from block template: %w", err) } - mmcid, err := store.Put(store.Context(), &types.MsgMeta{ - BlsMessages: blsmsgroot, - SecpkMessages: secpkmsgroot, - }) - if err != nil { - return nil, err - } - next.Messages = mmcid - - aggSig, err := consensus.AggregateSignatures(blsSigs) - if err != nil { - return nil, err - } - - next.BLSAggregate = aggSig - pweight, err := filec.sm.ChainStore().Weight(ctx, pts) - if err != nil { - return nil, err - } - next.ParentWeight = pweight - - baseFee, err := filec.sm.ChainStore().ComputeBaseFee(ctx, pts) - if err != nil { - return nil, xerrors.Errorf("computing base fee: %w", err) - } - next.ParentBaseFee = baseFee - - nosigbytes, err := next.SigningBytes() - if err != nil { - return nil, xerrors.Errorf("failed to get signing bytes for block: %w", err) - } - - sig, err := w.WalletSign(ctx, worker, nosigbytes, api.MsgMeta{ - Type: api.MTBlock, - }) - if err != nil { + if err := signBlock(ctx, w, worker, next); err != nil { return nil, xerrors.Errorf("failed to sign new block: %w", err) } - next.BlockSig = sig - fullBlock := &types.FullBlock{ Header: next, BlsMessages: blsMessages, diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index 061e45e69..075937a3c 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -533,12 +533,11 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *stmgr.StateManager, _ st MessageReceipt: *stmgr.MakeFakeRct(), ActorErr: nil, ExecutionTrace: types.ExecutionTrace{ - Msg: fakeMsg, - MsgRct: stmgr.MakeFakeRct(), - Error: "", - Duration: 0, - GasCharges: nil, - Subcalls: subcalls, + Msg: types.MessageTrace{ + To: fakeMsg.To, + From: fakeMsg.From, + }, + Subcalls: subcalls, }, Duration: 0, GasCosts: nil, @@ -711,12 +710,11 @@ func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr addre MessageReceipt: *stmgr.MakeFakeRct(), ActorErr: nil, ExecutionTrace: types.ExecutionTrace{ - Msg: fakeMsg, - MsgRct: stmgr.MakeFakeRct(), - Error: "", - Duration: 0, - GasCharges: nil, - Subcalls: subcalls, + Msg: types.MessageTrace{ + From: fakeMsg.From, + To: fakeMsg.To, + }, + Subcalls: subcalls, }, Duration: 0, GasCosts: nil, diff --git a/chain/consensus/iface.go b/chain/consensus/iface.go index 06dc0a113..10c3ead74 100644 --- a/chain/consensus/iface.go +++ b/chain/consensus/iface.go @@ -4,17 +4,99 @@ import ( "context" pubsub "github.com/libp2p/go-libp2p-pubsub" + "go.opencensus.io/stats" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/metrics" ) type Consensus interface { + // ValidateBlockHeader is called by peers when they receive a new block through the network. + // + // This is a fast sanity-check validation performed by the PubSub protocol before delivering + // it to the syncer. It checks that the block has the right format and it performs + // other consensus-specific light verifications like ensuring that the block is signed by + // a valid miner, or that it includes all the data required for a full verification. + ValidateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) + + // ValidateBlock is called by the syncer to determine if to accept a block or not. + // + // It performs all the checks needed by the syncer to accept + // the block (signature verifications, VRF checks, message validity, etc.) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) - ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) + + // IsEpochBeyondCurrMax is used to configure the fork rules for longest-chain + // consensus protocols. IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool + // CreateBlock implements all the logic required to propose and assemble a new Filecoin block. + // + // This function encapsulate all the consensus-specific actions to propose a new block + // such as the ordering of transactions, the inclusion of consensus proofs, the signature + // of the block, etc. CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) } + +// RewardFunc parametrizes the logic for rewards when a message is executed. +// +// Each consensus implementation can set their own reward function. +type RewardFunc func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonitor, + epoch abi.ChainEpoch, ts *types.TipSet, params *reward.AwardBlockRewardParams) error + +// ValidateBlockPubsub implements the common checks performed by all consensus implementations +// when a block is received through the pubsub channel. +func ValidateBlockPubsub(ctx context.Context, cns Consensus, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) { + if self { + return validateLocalBlock(ctx, msg) + } + + // track validation time + begin := build.Clock.Now() + defer func() { + log.Debugf("block validation time: %s", build.Clock.Since(begin)) + }() + + stats.Record(ctx, metrics.BlockReceived.M(1)) + + recordFailureFlagPeer := func(what string) { + // bv.Validate will flag the peer in that case + panic(what) + } + + blk, what, err := decodeAndCheckBlock(msg) + if err != nil { + log.Error("got invalid block over pubsub: ", err) + recordFailureFlagPeer(what) + return pubsub.ValidationReject, what + } + + // validate the block meta: the Message CID in the header must match the included messages + err = validateMsgMeta(ctx, blk) + if err != nil { + log.Warnf("error validating message metadata: %s", err) + recordFailureFlagPeer("invalid_block_meta") + return pubsub.ValidationReject, "invalid_block_meta" + } + + reject, err := cns.ValidateBlockHeader(ctx, blk.Header) + if err != nil { + if reject == "" { + log.Warn("ignoring block msg: ", err) + return pubsub.ValidationIgnore, reject + } + recordFailureFlagPeer(reject) + return pubsub.ValidationReject, reject + } + + // all good, accept the block + msg.ValidatorData = blk + stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) + return pubsub.ValidationAccept, "" +} diff --git a/chain/signatures.go b/chain/consensus/signatures.go similarity index 99% rename from chain/signatures.go rename to chain/consensus/signatures.go index 1dc67fd2c..cb0e229a8 100644 --- a/chain/signatures.go +++ b/chain/consensus/signatures.go @@ -1,4 +1,4 @@ -package chain +package consensus import ( "golang.org/x/xerrors" diff --git a/chain/events/message_cache.go b/chain/events/message_cache.go index 81b79cb38..d47d3a168 100644 --- a/chain/events/message_cache.go +++ b/chain/events/message_cache.go @@ -4,7 +4,7 @@ import ( "context" "sync" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/go-cid" "github.com/filecoin-project/lotus/api" @@ -14,14 +14,14 @@ type messageCache struct { api EventAPI blockMsgLk sync.Mutex - blockMsgCache *lru.ARCCache + blockMsgCache *lru.ARCCache[cid.Cid, *api.BlockMessages] } -func newMessageCache(api EventAPI) *messageCache { - blsMsgCache, _ := lru.NewARC(500) +func newMessageCache(a EventAPI) *messageCache { + blsMsgCache, _ := lru.NewARC[cid.Cid, *api.BlockMessages](500) return &messageCache{ - api: api, + api: a, blockMsgCache: blsMsgCache, } } @@ -30,14 +30,14 @@ func (c *messageCache) ChainGetBlockMessages(ctx context.Context, blkCid cid.Cid c.blockMsgLk.Lock() defer c.blockMsgLk.Unlock() - msgsI, ok := c.blockMsgCache.Get(blkCid) + msgs, ok := c.blockMsgCache.Get(blkCid) var err error if !ok { - msgsI, err = c.api.ChainGetBlockMessages(ctx, blkCid) + msgs, err = c.api.ChainGetBlockMessages(ctx, blkCid) if err != nil { return nil, err } - c.blockMsgCache.Add(blkCid, msgsI) + c.blockMsgCache.Add(blkCid, msgs) } - return msgsI.(*api.BlockMessages), nil + return msgs, nil } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index f3b1ae212..8e3c41a89 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -5,7 +5,7 @@ import ( "context" "fmt" "io" - "io/ioutil" + "os" "sync/atomic" "time" @@ -31,6 +31,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" "github.com/filecoin-project/lotus/chain/rand" @@ -166,7 +167,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS maddr1 := genesis2.MinerAddress(0) - m1temp, err := ioutil.TempDir("", "preseal") + m1temp, err := os.MkdirTemp("", "preseal") if err != nil { return nil, err } @@ -178,7 +179,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS maddr2 := genesis2.MinerAddress(1) - m2temp, err := ioutil.TempDir("", "preseal") + m2temp, err := os.MkdirTemp("", "preseal") if err != nil { return nil, err } @@ -255,7 +256,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS //return nil, xerrors.Errorf("creating drand beacon: %w", err) //} - sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), sys, us, beac) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds) if err != nil { return nil, xerrors.Errorf("initing stmgr: %w", err) } diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index d1c7d308e..3e8848021 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -38,7 +38,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/actors/builtin/system" "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" - "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -486,7 +486,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca Epoch: 0, Rand: &fakeRand{}, Bstore: cs.StateBlockstore(), - Actors: filcns.NewActorRegistry(), + Actors: consensus.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), CircSupplyCalc: csc, NetworkVersion: nv, diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 97f5de10d..c083f4fda 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -42,7 +42,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -96,7 +96,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal Epoch: 0, Rand: &fakeRand{}, Bstore: cs.StateBlockstore(), - Actors: filcns.NewActorRegistry(), + Actors: consensus.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), CircSupplyCalc: csc, NetworkVersion: nv, diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 06fc12b92..0d787bd50 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -12,7 +12,7 @@ import ( "time" "github.com/hashicorp/go-multierror" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -33,8 +33,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -160,7 +159,7 @@ type MessagePool struct { // pruneCooldown is a channel used to allow a cooldown time between prunes pruneCooldown chan struct{} - blsSigCache *lru.TwoQueueCache + blsSigCache *lru.TwoQueueCache[cid.Cid, crypto.Signature] changes *lps.PubSub @@ -168,9 +167,9 @@ type MessagePool struct { netName dtypes.NetworkName - sigValCache *lru.TwoQueueCache + sigValCache *lru.TwoQueueCache[string, struct{}] - nonceCache *lru.Cache + nonceCache *lru.Cache[nonceCacheKey, uint64] evtTypes [3]journal.EventType journal journal.Journal @@ -370,9 +369,9 @@ func (ms *msgSet) toSlice() []*types.SignedMessage { } func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { - cache, _ := lru.New2Q(build.BlsSignatureCacheSize) - verifcache, _ := lru.New2Q(build.VerifSigCacheSize) - noncecache, _ := lru.New(256) + cache, _ := lru.New2Q[cid.Cid, crypto.Signature](build.BlsSignatureCacheSize) + verifcache, _ := lru.New2Q[string, struct{}](build.VerifSigCacheSize) + noncecache, _ := lru.New[nonceCacheKey, uint64](256) cfg, err := loadConfig(ctx, ds) if err != nil { @@ -798,7 +797,7 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error { return nil } - if err := chain.AuthenticateMessage(m, m.Message.From); err != nil { + if err := consensus.AuthenticateMessage(m, m.Message.From); err != nil { return xerrors.Errorf("failed to validate signature: %w", err) } @@ -866,7 +865,7 @@ func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs nv := mp.api.StateNetworkVersion(ctx, epoch) // TODO: I'm not thrilled about depending on filcns here, but I prefer this to duplicating logic - if !filcns.IsValidForSending(nv, senderAct) { + if !consensus.IsValidForSending(nv, senderAct) { return false, xerrors.Errorf("sender actor %s is not a valid top-level sender", m.Message.From) } @@ -1054,7 +1053,7 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, n, ok := mp.nonceCache.Get(nk) if ok { - return n.(uint64), nil + return n, nil } act, err := mp.api.GetActorAfter(addr, ts) @@ -1474,15 +1473,10 @@ func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.Bloc } func (mp *MessagePool) RecoverSig(msg *types.Message) *types.SignedMessage { - val, ok := mp.blsSigCache.Get(msg.Cid()) + sig, ok := mp.blsSigCache.Get(msg.Cid()) if !ok { return nil } - sig, ok := val.(crypto.Signature) - if !ok { - log.Errorf("value in signature cache was not a signature (got %T)", val) - return nil - } return &types.SignedMessage{ Message: *msg, diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 901fc2d12..61056528f 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -52,8 +52,8 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. } // CallWithGas calculates the state for a given tipset, and then applies the given message on top of that state. -func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet) (*api.InvocResult, error) { - return sm.callInternal(ctx, msg, priorMsgs, ts, cid.Undef, sm.GetNetworkVersion, true, true) +func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet, applyTsMessages bool) (*api.InvocResult, error) { + return sm.callInternal(ctx, msg, priorMsgs, ts, cid.Undef, sm.GetNetworkVersion, true, applyTsMessages) } // CallAtStateAndVersion allows you to specify a message to execute on the given stateCid and network version. @@ -117,12 +117,22 @@ func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, pr if stateCid == cid.Undef { stateCid = ts.ParentState() } + tsMsgs, err := sm.cs.MessagesForTipset(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("failed to lookup messages for parent tipset: %w", err) + } + if applyTsMessages { - tsMsgs, err := sm.cs.MessagesForTipset(ctx, ts) - if err != nil { - return nil, xerrors.Errorf("failed to lookup messages for parent tipset: %w", err) - } priorMsgs = append(tsMsgs, priorMsgs...) + } else { + var filteredTsMsgs []types.ChainMsg + for _, tsMsg := range tsMsgs { + //TODO we should technically be normalizing the filecoin address of from when we compare here + if tsMsg.VMMessage().From == msg.VMMessage().From { + filteredTsMsgs = append(filteredTsMsgs, tsMsg) + } + } + priorMsgs = append(filteredTsMsgs, priorMsgs...) } // Technically, the tipset we're passing in here should be ts+1, but that may not exist. diff --git a/chain/stmgr/execute.go b/chain/stmgr/execute.go index e67398299..f85ff7c04 100644 --- a/chain/stmgr/execute.go +++ b/chain/stmgr/execute.go @@ -8,6 +8,7 @@ import ( "go.opencensus.io/trace" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) @@ -52,6 +53,22 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c sm.stlk.Unlock() + if ts.Height() == 0 { + // NB: This is here because the process that executes blocks requires that the + // block miner reference a valid miner in the state tree. Unless we create some + // magical genesis miner, this won't work properly, so we short circuit here + // This avoids the question of 'who gets paid the genesis block reward'. + // This also makes us not attempt to lookup the tipset state with + // tryLookupTipsetState, which would cause a very long, very slow walk. + return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil + } + + // First, try to find the tipset in the current chain. If found, we can avoid re-executing + // it. + if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found { + return st, rec, nil + } + st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false) if err != nil { return cid.Undef, cid.Undef, err @@ -60,6 +77,51 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return st, rec, nil } +// Try to lookup a state & receipt CID for a given tipset by walking the chain instead of executing +// it. This will only successfully return the state/receipt CIDs if they're found in the state +// store. +// +// NOTE: This _won't_ recursively walk the receipt/state trees. It assumes that having the root +// implies having the rest of the tree. However, lotus generally makes that assumption anyways. +func tryLookupTipsetState(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) (cid.Cid, cid.Cid, bool) { + nextTs, err := cs.GetTipsetByHeight(ctx, ts.Height()+1, nil, false) + if err != nil { + // Nothing to see here. The requested height may be beyond the current head. + return cid.Undef, cid.Undef, false + } + + // Make sure we're on the correct fork. + if nextTs.Parents() != ts.Key() { + // Also nothing to see here. This just means that the requested tipset is on a + // different fork. + return cid.Undef, cid.Undef, false + } + + stateCid := nextTs.ParentState() + receiptCid := nextTs.ParentMessageReceipts() + + // Make sure we have the parent state. + if hasState, err := cs.StateBlockstore().Has(ctx, stateCid); err != nil { + log.Errorw("failed to lookup state-root in blockstore", "cid", stateCid, "error", err) + return cid.Undef, cid.Undef, false + } else if !hasState { + // We have the chain but don't have the state. It looks like we need to try + // executing? + return cid.Undef, cid.Undef, false + } + + // Make sure we have the receipts. + if hasReceipts, err := cs.ChainBlockstore().Has(ctx, receiptCid); err != nil { + log.Errorw("failed to lookup receipts in blockstore", "cid", receiptCid, "error", err) + return cid.Undef, cid.Undef, false + } else if !hasReceipts { + // If we don't have the receipts, re-execute and try again. + return cid.Undef, cid.Undef, false + } + + return stateCid, receiptCid, true +} + func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) { st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em, true) return st, err diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 514f78f76..1f9977d96 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -4,7 +4,9 @@ import ( "bytes" "context" "encoding/binary" + "os" "sort" + "strings" "sync" "time" @@ -26,6 +28,9 @@ import ( "github.com/filecoin-project/lotus/chain/vm" ) +// EnvDisablePreMigrations when set to '1' stops pre-migrations from running +const EnvDisablePreMigrations = "LOTUS_DISABLE_PRE_MIGRATIONS" + // MigrationCache can be used to cache information used by a migration. This is primarily useful to // "pre-compute" some migration state ahead of time, and make it accessible in the migration itself. type MigrationCache interface { @@ -169,9 +174,16 @@ func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, err func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) { retCid := root - var err error u := sm.stateMigrations[height] if u != nil && u.upgrade != nil { + migCid, ok, err := u.migrationResultCache.Get(ctx, root) + if err == nil && ok { + log.Infow("CACHED migration", "height", height, "from", root, "to", migCid) + return migCid, nil + } else if err != nil { + log.Errorw("failed to lookup previous migration result", "err", err) + } + startTime := time.Now() log.Warnw("STARTING migration", "height", height, "from", root) // Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may @@ -192,6 +204,11 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig "to", retCid, "duration", time.Since(startTime), ) + + // Only set if migration ran, we do not want a root => root mapping + if err := u.migrationResultCache.Store(ctx, root, retCid); err != nil { + log.Errorw("failed to store migration result", "err", err) + } } return retCid, nil @@ -218,6 +235,11 @@ func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, height := ts.Height() parent := ts.ParentState() + if disabled := os.Getenv(EnvDisablePreMigrations); strings.TrimSpace(disabled) == "1" { + log.Warnw("SKIPPING pre-migration", "height", height) + return + } + startTime := time.Now() log.Warn("STARTING pre-migration") @@ -347,12 +369,11 @@ func DoTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo // record the transfer in execution traces cb(types.ExecutionTrace{ - Msg: MakeFakeMsg(from, to, amt, 0), - MsgRct: MakeFakeRct(), - Error: "", - Duration: 0, - GasCharges: nil, - Subcalls: nil, + Msg: types.MessageTrace{ + From: from, + To: to, + Value: amt, + }, }) } diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 98ab647c9..caf2c22ce 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -5,10 +5,12 @@ import ( "context" "fmt" "io" + "os" "sync" "testing" "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" ipldcbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" @@ -31,8 +33,10 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" _init "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" + "github.com/filecoin-project/lotus/chain/stmgr" . "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" @@ -128,7 +132,7 @@ func TestForkHeightTriggers(t *testing.T) { } sm, err := NewStateManager( - cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{ + cg.ChainStore(), consensus.NewTipSetExecutor(filcns.RewardFunc), cg.StateManager().VMSys(), UpgradeSchedule{{ Network: network.Version1, Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, @@ -164,12 +168,12 @@ func TestForkHeightTriggers(t *testing.T) { } return st.Flush(ctx) - }}}, cg.BeaconSchedule()) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore()) if err != nil { t.Fatal(err) } - inv := filcns.NewActorRegistry() + inv := consensus.NewActorRegistry() registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}}) inv.Register(actorstypes.Version0, nil, registry) @@ -274,7 +278,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { var migrationCount int sm, err := NewStateManager( - cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{ + cg.ChainStore(), consensus.NewTipSetExecutor(filcns.RewardFunc), cg.StateManager().VMSys(), UpgradeSchedule{{ Network: network.Version1, Expensive: true, Height: testForkHeight, @@ -282,12 +286,12 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { migrationCount++ return root, nil - }}}, cg.BeaconSchedule()) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore()) if err != nil { t.Fatal(err) } - inv := filcns.NewActorRegistry() + inv := consensus.NewActorRegistry() registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}}) inv.Register(actorstypes.Version0, nil, registry) @@ -336,7 +340,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { currentHeight := ts.TipSet.TipSet().Height() // CallWithGas calls on top of the given tipset. - ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet()) + ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet(), true) if parentHeight <= testForkHeight && currentHeight >= testForkHeight { // If I had a fork, or I _will_ have a fork, it should fail. require.Equal(t, ErrExpensiveFork, err) @@ -357,7 +361,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { // Calls without a tipset should walk back to the last non-fork tipset. // We _verify_ that the migration wasn't run multiple times at the end of the // test. - ret, err = sm.CallWithGas(ctx, m, nil, nil) + ret, err = sm.CallWithGas(ctx, m, nil, nil, true) require.NoError(t, err) require.True(t, ret.MsgRct.ExitCode.IsSuccess()) @@ -412,7 +416,7 @@ func TestForkPreMigration(t *testing.T) { counter := make(chan struct{}, 10) sm, err := NewStateManager( - cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{ + cg.ChainStore(), consensus.NewTipSetExecutor(filcns.RewardFunc), cg.StateManager().VMSys(), UpgradeSchedule{{ Network: network.Version1, Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, @@ -500,7 +504,7 @@ func TestForkPreMigration(t *testing.T) { return nil }, }}}, - }, cg.BeaconSchedule()) + }, cg.BeaconSchedule(), datastore.NewMapDatastore()) if err != nil { t.Fatal(err) } @@ -509,7 +513,7 @@ func TestForkPreMigration(t *testing.T) { require.NoError(t, sm.Stop(context.Background())) }() - inv := filcns.NewActorRegistry() + inv := consensus.NewActorRegistry() registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}}) inv.Register(actorstypes.Version0, nil, registry) @@ -534,3 +538,170 @@ func TestForkPreMigration(t *testing.T) { // to this channel. require.Equal(t, 6, len(counter)) } + +func TestDisablePreMigration(t *testing.T) { + logging.SetAllLoggers(logging.LevelInfo) + + cg, err := gen.NewGenerator() + require.NoError(t, err) + + err = os.Setenv(EnvDisablePreMigrations, "1") + require.NoError(t, err) + + defer func() { + err := os.Unsetenv(EnvDisablePreMigrations) + require.NoError(t, err) + }() + + counter := make(chan struct{}, 10) + + sm, err := NewStateManager( + cg.ChainStore(), + consensus.NewTipSetExecutor(filcns.RewardFunc), + cg.StateManager().VMSys(), + UpgradeSchedule{{ + Network: network.Version1, + Height: testForkHeight, + Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor, + root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) { + + counter <- struct{}{} + + return root, nil + }, + PreMigrations: []PreMigration{{ + StartWithin: 20, + PreMigration: func(ctx context.Context, _ *StateManager, _ MigrationCache, + _ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error { + panic("should be skipped") + }, + }}}, + }, + cg.BeaconSchedule(), + datastore.NewMapDatastore(), + ) + require.NoError(t, err) + require.NoError(t, sm.Start(context.Background())) + defer func() { + require.NoError(t, sm.Stop(context.Background())) + }() + + inv := consensus.NewActorRegistry() + registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}}) + inv.Register(actorstypes.Version0, nil, registry) + + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { + nvm, err := vm.NewLegacyVM(ctx, vmopt) + require.NoError(t, err) + nvm.SetInvoker(inv) + return nvm, nil + }) + + cg.SetStateManager(sm) + + for i := 0; i < 50; i++ { + _, err := cg.NextTipSet() + require.NoError(t, err) + } + + require.Equal(t, 1, len(counter)) +} + +func TestMigrtionCache(t *testing.T) { + logging.SetAllLoggers(logging.LevelInfo) + + cg, err := gen.NewGenerator() + require.NoError(t, err) + + counter := make(chan struct{}, 10) + metadataDs := datastore.NewMapDatastore() + + sm, err := NewStateManager( + cg.ChainStore(), + consensus.NewTipSetExecutor(filcns.RewardFunc), + cg.StateManager().VMSys(), + UpgradeSchedule{{ + Network: network.Version1, + Height: testForkHeight, + Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor, + root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) { + + counter <- struct{}{} + + return root, nil + }}, + }, + cg.BeaconSchedule(), + metadataDs, + ) + require.NoError(t, err) + require.NoError(t, sm.Start(context.Background())) + defer func() { + require.NoError(t, sm.Stop(context.Background())) + }() + + inv := consensus.NewActorRegistry() + registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}}) + inv.Register(actorstypes.Version0, nil, registry) + + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { + nvm, err := vm.NewLegacyVM(ctx, vmopt) + require.NoError(t, err) + nvm.SetInvoker(inv) + return nvm, nil + }) + + cg.SetStateManager(sm) + + for i := 0; i < 50; i++ { + _, err := cg.NextTipSet() + require.NoError(t, err) + } + + ts, err := cg.ChainStore().GetTipsetByHeight(context.Background(), testForkHeight, nil, false) + require.NoError(t, err) + + root, _, err := stmgr.ComputeState(context.Background(), sm, testForkHeight+1, []*types.Message{}, ts) + require.NoError(t, err) + t.Log(root) + + require.Equal(t, 1, len(counter)) + + { + sm, err := NewStateManager( + cg.ChainStore(), + consensus.NewTipSetExecutor(filcns.RewardFunc), + cg.StateManager().VMSys(), + UpgradeSchedule{{ + Network: network.Version1, + Height: testForkHeight, + Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor, + root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) { + + counter <- struct{}{} + + return root, nil + }}, + }, + cg.BeaconSchedule(), + metadataDs, + ) + require.NoError(t, err) + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { + nvm, err := vm.NewLegacyVM(ctx, vmopt) + require.NoError(t, err) + nvm.SetInvoker(inv) + return nvm, nil + }) + + ctx := context.Background() + + base, _, err := sm.ExecutionTrace(ctx, ts) + require.NoError(t, err) + _, err = sm.HandleStateForks(context.Background(), base, ts.Height(), nil, ts) + require.NoError(t, err) + + // Should not have increased as we should be using the cached results in the metadataDs + require.Equal(t, 1, len(counter)) + } +} diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index ee9338e63..575e6ac2e 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -2,10 +2,13 @@ package stmgr import ( "context" + "fmt" "sync" "github.com/ipfs/go-cid" + dstore "github.com/ipfs/go-datastore" cbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" @@ -51,9 +54,47 @@ type versionSpec struct { } type migration struct { - upgrade MigrationFunc - preMigrations []PreMigration - cache *nv16.MemMigrationCache + upgrade MigrationFunc + preMigrations []PreMigration + cache *nv16.MemMigrationCache + migrationResultCache *migrationResultCache +} + +type migrationResultCache struct { + ds dstore.Batching + keyPrefix string +} + +func (m *migrationResultCache) keyForMigration(root cid.Cid) dstore.Key { + kStr := fmt.Sprintf("%s/%s", m.keyPrefix, root) + return dstore.NewKey(kStr) +} + +func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) { + k := m.keyForMigration(root) + + bs, err := m.ds.Get(ctx, k) + if ipld.IsNotFound(err) { + return cid.Undef, false, nil + } else if err != nil { + return cid.Undef, false, xerrors.Errorf("error loading migration result: %w", err) + } + + c, err := cid.Parse(bs) + if err != nil { + return cid.Undef, false, xerrors.Errorf("error parsing migration result: %w", err) + } + + return c, true, nil +} + +func (m *migrationResultCache) Store(ctx context.Context, root cid.Cid, resultCid cid.Cid) error { + k := m.keyForMigration(root) + if err := m.ds.Put(ctx, k, resultCid.Bytes()); err != nil { + return err + } + + return nil } type Executor interface { @@ -89,8 +130,7 @@ type StateManager struct { postIgnitionVesting []msig0.State postCalicoVesting []msig0.State - genesisPledge abi.TokenAmount - genesisMarketFunds abi.TokenAmount + genesisPledge abi.TokenAmount tsExec Executor tsExecMonitor ExecMonitor @@ -103,7 +143,7 @@ type treeCache struct { tree *state.StateTree } -func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule) (*StateManager, error) { +func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching) (*StateManager, error) { // If we have upgrades, make sure they're in-order and make sense. if err := us.Validate(); err != nil { return nil, err @@ -122,12 +162,18 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, upgrade: upgrade.Migration, preMigrations: upgrade.PreMigrations, cache: nv16.NewMemMigrationCache(), + migrationResultCache: &migrationResultCache{ + keyPrefix: fmt.Sprintf("/migration-cache/nv%d", upgrade.Network), + ds: metadataDs, + }, } + stateMigrations[upgrade.Height] = migration } if upgrade.Expensive { expensiveUpgrades[upgrade.Height] = struct{}{} } + networkVersions = append(networkVersions, versionSpec{ networkVersion: lastVersion, atOrBelow: upgrade.Height, @@ -155,8 +201,8 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, }, nil } -func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor) (*StateManager, error) { - sm, err := NewStateManager(cs, exec, sys, us, b) +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching) (*StateManager, error) { + sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs) if err != nil { return nil, err } diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go index a48ff36c7..b48f9af43 100644 --- a/chain/stmgr/supply.go +++ b/chain/stmgr/supply.go @@ -51,17 +51,13 @@ func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error { return xerrors.Errorf("loading state tree: %w", err) } - gmf, err := getFilMarketLocked(ctx, sTree) - if err != nil { - return xerrors.Errorf("setting up genesis market funds: %w", err) - } - gp, err := getFilPowerLocked(ctx, sTree) if err != nil { return xerrors.Errorf("setting up genesis pledge: %w", err) } - sm.genesisMarketFunds = gmf + sm.genesisMsigLk.Lock() + defer sm.genesisMsigLk.Unlock() sm.genesisPledge = gp totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) @@ -128,6 +124,8 @@ func (sm *StateManager) setupPostIgnitionVesting(ctx context.Context) error { totalsByEpoch[sixYears] = big.NewInt(100_000_000) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + sm.genesisMsigLk.Lock() + defer sm.genesisMsigLk.Unlock() sm.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch)) for k, v := range totalsByEpoch { ns := msig0.State{ @@ -178,6 +176,9 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error { totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053)) + sm.genesisMsigLk.Lock() + defer sm.genesisMsigLk.Unlock() + sm.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch)) for k, v := range totalsByEpoch { ns := msig0.State{ @@ -198,21 +199,20 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error { func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) { vf := big.Zero() - sm.genesisMsigLk.Lock() - defer sm.genesisMsigLk.Unlock() - // TODO: combine all this? - if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() { + if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() { err := sm.setupGenesisVestingSchedule(ctx) if err != nil { return vf, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err) } + } if sm.postIgnitionVesting == nil { err := sm.setupPostIgnitionVesting(ctx) if err != nil { return vf, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err) } + } if sm.postCalicoVesting == nil { err := sm.setupPostCalicoVesting(ctx) @@ -246,8 +246,6 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) if height <= build.UpgradeAssemblyHeight { // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch vf = big.Add(vf, sm.genesisPledge) - // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch - vf = big.Add(vf, sm.genesisMarketFunds) } return vf, nil diff --git a/chain/store/index.go b/chain/store/index.go index fe8f399ee..620cb2dee 100644 --- a/chain/store/index.go +++ b/chain/store/index.go @@ -4,8 +4,8 @@ import ( "context" "os" "strconv" + "sync" - lru "github.com/hashicorp/golang-lru" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -13,7 +13,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var DefaultChainIndexCacheSize = 32 << 10 +var DefaultChainIndexCacheSize = 32 << 15 func init() { if s := os.Getenv("LOTUS_CHAIN_INDEX_CACHE"); s != "" { @@ -27,7 +27,8 @@ func init() { } type ChainIndex struct { - skipCache *lru.ARCCache + indexCacheLk sync.Mutex + indexCache map[types.TipSetKey]*lbEntry loadTipSet loadTipSetFunc @@ -36,17 +37,14 @@ type ChainIndex struct { type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error) func NewChainIndex(lts loadTipSetFunc) *ChainIndex { - sc, _ := lru.NewARC(DefaultChainIndexCacheSize) return &ChainIndex{ - skipCache: sc, + indexCache: make(map[types.TipSetKey]*lbEntry, DefaultChainIndexCacheSize), loadTipSet: lts, skipLength: 20, } } type lbEntry struct { - ts *types.TipSet - parentHeight abi.ChainEpoch targetHeight abi.ChainEpoch target types.TipSetKey } @@ -58,25 +56,36 @@ func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, rounded, err := ci.roundDown(ctx, from) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to round down: %w", err) } + ci.indexCacheLk.Lock() + defer ci.indexCacheLk.Unlock() cur := rounded.Key() for { - cval, ok := ci.skipCache.Get(cur) + lbe, ok := ci.indexCache[cur] if !ok { fc, err := ci.fillCache(ctx, cur) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to fill cache: %w", err) } - cval = fc + lbe = fc } - lbe := cval.(*lbEntry) - if lbe.ts.Height() == to || lbe.parentHeight < to { - return lbe.ts, nil - } else if to > lbe.targetHeight { - return ci.walkBack(ctx, lbe.ts, to) + if to == lbe.targetHeight { + ts, err := ci.loadTipSet(ctx, lbe.target) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset: %w", err) + } + + return ts, nil + } + if to > lbe.targetHeight { + ts, err := ci.loadTipSet(ctx, cur) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset: %w", err) + } + return ci.walkBack(ctx, ts, to) } cur = lbe.target @@ -87,16 +96,17 @@ func (ci *ChainIndex) GetTipsetByHeightWithoutCache(ctx context.Context, from *t return ci.walkBack(ctx, from, to) } +// Caller must hold indexCacheLk func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) { ts, err := ci.loadTipSet(ctx, tsk) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to load tipset: %w", err) } if ts.Height() == 0 { return &lbEntry{ - ts: ts, - parentHeight: 0, + targetHeight: 0, + target: tsk, }, nil } @@ -124,12 +134,10 @@ func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEn } lbe := &lbEntry{ - ts: ts, - parentHeight: parent.Height(), targetHeight: skipTarget.Height(), target: skipTarget.Key(), } - ci.skipCache.Add(tsk, lbe) + ci.indexCache[tsk] = lbe return lbe, nil } @@ -144,7 +152,7 @@ func (ci *ChainIndex) roundDown(ctx context.Context, ts *types.TipSet) (*types.T rounded, err := ci.walkBack(ctx, ts, target) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to walk back: %w", err) } return rounded, nil @@ -164,7 +172,7 @@ func (ci *ChainIndex) walkBack(ctx context.Context, from *types.TipSet, to abi.C for { pts, err := ci.loadTipSet(ctx, ts.Parents()) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to load tipset: %w", err) } if to > pts.Height() { diff --git a/chain/store/messages.go b/chain/store/messages.go index 5ac62d394..c39cb3f9b 100644 --- a/chain/store/messages.go +++ b/chain/store/messages.go @@ -207,9 +207,7 @@ type mmCids struct { } func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { - o, ok := cs.mmCache.Get(mmc) - if ok { - mmcids := o.(*mmCids) + if mmcids, ok := cs.mmCache.Get(mmc); ok { return mmcids.bls, mmcids.secpk, nil } @@ -229,7 +227,7 @@ func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.C return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err) } - cs.mmCache.Add(mmc, &mmCids{ + cs.mmCache.Add(mmc, mmCids{ bls: blscids, secpk: secpkcids, }) @@ -237,6 +235,26 @@ func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.C return blscids, secpkcids, nil } +func (cs *ChainStore) ReadReceipts(ctx context.Context, root cid.Cid) ([]types.MessageReceipt, error) { + a, err := blockadt.AsArray(cs.ActorStore(ctx), root) + if err != nil { + return nil, err + } + + receipts := make([]types.MessageReceipt, 0, a.Length()) + var rcpt types.MessageReceipt + if err := a.ForEach(&rcpt, func(i int64) error { + if int64(len(receipts)) != i { + return xerrors.Errorf("missing receipt %d", i) + } + receipts = append(receipts, rcpt) + return nil + }); err != nil { + return nil, err + } + return receipts, nil +} + func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { blscids, secpkcids, err := cs.ReadMsgMetaCids(ctx, b.Messages) if err != nil { diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go index 36435152e..da568d0fb 100644 --- a/chain/store/snapshot.go +++ b/chain/store/snapshot.go @@ -3,7 +3,10 @@ package store import ( "bytes" "context" + "fmt" "io" + "sync" + "time" "github.com/ipfs/go-cid" blocks "github.com/ipfs/go-libipfs/blocks" @@ -12,6 +15,8 @@ import ( carv2 "github.com/ipld/go-car/v2" mh "github.com/multiformats/go-multihash" cbg "github.com/whyrusleeping/cbor-gen" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -132,6 +137,423 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e return root, nil } +type walkSchedTaskType int + +const ( + finishTask walkSchedTaskType = -1 + blockTask walkSchedTaskType = iota + messageTask + receiptTask + stateTask + dagTask +) + +func (t walkSchedTaskType) String() string { + switch t { + case finishTask: + return "finish" + case blockTask: + return "block" + case messageTask: + return "message" + case receiptTask: + return "receipt" + case stateTask: + return "state" + case dagTask: + return "dag" + } + panic(fmt.Sprintf("unknow task %d", t)) +} + +type walkTask struct { + c cid.Cid + taskType walkSchedTaskType +} + +// an ever growing FIFO +type taskFifo struct { + in chan walkTask + out chan walkTask + fifo []walkTask +} + +type taskResult struct { + c cid.Cid + b blocks.Block +} + +func newTaskFifo(bufferLen int) *taskFifo { + f := taskFifo{ + in: make(chan walkTask, bufferLen), + out: make(chan walkTask, bufferLen), + fifo: make([]walkTask, 0), + } + + go f.run() + + return &f +} + +func (f *taskFifo) Close() error { + close(f.in) + return nil +} + +func (f *taskFifo) run() { + for { + if len(f.fifo) > 0 { + // we have items in slice + // try to put next out or read something in. + // blocks if nothing works. + next := f.fifo[0] + select { + case f.out <- next: + f.fifo = f.fifo[1:] + case elem, ok := <-f.in: + if !ok { + // drain and close out. + for _, elem := range f.fifo { + f.out <- elem + } + close(f.out) + return + } + f.fifo = append(f.fifo, elem) + } + } else { + // no elements in fifo to put out. + // Try to read in and block. + // When done, try to put out or add to fifo. + select { + case elem, ok := <-f.in: + if !ok { + close(f.out) + return + } + select { + case f.out <- elem: + default: + f.fifo = append(f.fifo, elem) + } + } + } + } +} + +type walkSchedulerConfig struct { + numWorkers int + + head *types.TipSet // Tipset to start walking from. + tail *types.TipSet // Tipset to end at. + includeMessages bool + includeReceipts bool + includeState bool +} + +type walkScheduler struct { + ctx context.Context + cancel context.CancelFunc + + store bstore.Blockstore + cfg walkSchedulerConfig + writer io.Writer + + workerTasks *taskFifo + totalTasks atomic.Int64 + results chan taskResult + writeErrorChan chan error + + // tracks number of inflight tasks + //taskWg sync.WaitGroup + // launches workers and collects errors if any occur + workers *errgroup.Group + // set of CIDs already exported + seen sync.Map +} + +func newWalkScheduler(ctx context.Context, store bstore.Blockstore, cfg walkSchedulerConfig, w io.Writer) (*walkScheduler, error) { + ctx, cancel := context.WithCancel(ctx) + workers, ctx := errgroup.WithContext(ctx) + s := &walkScheduler{ + ctx: ctx, + cancel: cancel, + store: store, + cfg: cfg, + writer: w, + results: make(chan taskResult, cfg.numWorkers*64), + workerTasks: newTaskFifo(cfg.numWorkers * 64), + writeErrorChan: make(chan error, 1), + workers: workers, + } + + go func() { + defer close(s.writeErrorChan) + for r := range s.results { + // Write + if err := carutil.LdWrite(s.writer, r.c.Bytes(), r.b.RawData()); err != nil { + // abort operations + cancel() + s.writeErrorChan <- err + } + } + }() + + // workers + for i := 0; i < cfg.numWorkers; i++ { + f := func(n int) func() error { + return func() error { + return s.workerFunc(n) + } + }(i) + s.workers.Go(f) + } + + s.totalTasks.Add(int64(len(cfg.head.Blocks()))) + for _, b := range cfg.head.Blocks() { + select { + case <-ctx.Done(): + log.Errorw("context done while sending root tasks", ctx.Err()) + cancel() // kill workers + return nil, ctx.Err() + case s.workerTasks.in <- walkTask{ + c: b.Cid(), + taskType: blockTask, + }: + } + } + + return s, nil +} + +func (s *walkScheduler) Wait() error { + err := s.workers.Wait() + // all workers done. One would have reached genesis and notified the + // rest to exit. Yet, there might be some pending tasks in the queue, + // so we need to run a "single worker". + if err != nil { + log.Errorw("export workers finished with error", "error", err) + } + + for { + if n := s.totalTasks.Load(); n == 0 { + break // finally fully done + } + select { + case task := <-s.workerTasks.out: + s.totalTasks.Add(-1) + if err != nil { + continue // just drain if errors happened. + } + err = s.processTask(task, 0) + } + } + close(s.results) + errWrite := <-s.writeErrorChan + if errWrite != nil { + log.Errorw("error writing to CAR file", "error", err) + return errWrite + } + s.workerTasks.Close() //nolint:errcheck + return err +} + +func (s *walkScheduler) enqueueIfNew(task walkTask) { + if task.c.Prefix().MhType == mh.IDENTITY { + //log.Infow("ignored", "cid", todo.c.String()) + return + } + if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR { + //log.Infow("ignored", "cid", todo.c.String()) + return + } + if _, loaded := s.seen.LoadOrStore(task.c, struct{}{}); loaded { + // we already had it on the map + return + } + + log.Debugw("enqueue", "type", task.taskType.String(), "cid", task.c.String()) + s.totalTasks.Add(1) + s.workerTasks.in <- task +} + +func (s *walkScheduler) sendFinish(workerN int) error { + log.Infow("worker finished work", "worker", workerN) + s.totalTasks.Add(1) + s.workerTasks.in <- walkTask{ + taskType: finishTask, + } + return nil +} + +func (s *walkScheduler) workerFunc(workerN int) error { + log.Infow("starting worker", "worker", workerN) + for t := range s.workerTasks.out { + s.totalTasks.Add(-1) + select { + case <-s.ctx.Done(): + return s.ctx.Err() + default: + // A worker reached genesis, so we wind down and let others do + // the same. Exit. + if t.taskType == finishTask { + return s.sendFinish(workerN) + } + } + + err := s.processTask(t, workerN) + if err != nil { + return err + } + // continue + } + return nil +} + +func (s *walkScheduler) processTask(t walkTask, workerN int) error { + if t.taskType == finishTask { + return nil + } + + blk, err := s.store.Get(s.ctx, t.c) + if err != nil { + return xerrors.Errorf("writing object to car, bs.Get: %w", err) + } + + s.results <- taskResult{ + c: t.c, + b: blk, + } + + // extract relevant dags to walk from the block + if t.taskType == blockTask { + blk := t.c + data, err := s.store.Get(s.ctx, blk) + if err != nil { + return err + } + var b types.BlockHeader + if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil { + return xerrors.Errorf("unmarshalling block header (cid=%s): %w", blk, err) + } + if b.Height%1_000 == 0 { + log.Infow("block export", "height", b.Height) + } + if b.Height == 0 { + log.Info("exporting genesis block") + for i := range b.Parents { + s.enqueueIfNew(walkTask{ + c: b.Parents[i], + taskType: dagTask, + }) + } + s.enqueueIfNew(walkTask{ + c: b.ParentStateRoot, + taskType: stateTask, + }) + + return s.sendFinish(workerN) + } + // enqueue block parents + for i := range b.Parents { + s.enqueueIfNew(walkTask{ + c: b.Parents[i], + taskType: blockTask, + }) + } + if s.cfg.tail.Height() >= b.Height { + log.Debugw("tail reached: only blocks will be exported from now until genesis", "cid", blk.String()) + return nil + } + + if s.cfg.includeMessages { + // enqueue block messages + s.enqueueIfNew(walkTask{ + c: b.Messages, + taskType: messageTask, + }) + } + if s.cfg.includeReceipts { + // enqueue block receipts + s.enqueueIfNew(walkTask{ + c: b.ParentMessageReceipts, + taskType: receiptTask, + }) + } + if s.cfg.includeState { + s.enqueueIfNew(walkTask{ + c: b.ParentStateRoot, + taskType: stateTask, + }) + } + + return nil + } + + // Not a chain-block: we scan for CIDs in the raw block-data + return cbg.ScanForLinks(bytes.NewReader(blk.RawData()), func(c cid.Cid) { + if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY { + return + } + + s.enqueueIfNew(walkTask{ + c: c, + taskType: dagTask, + }) + }) +} + +func (cs *ChainStore) ExportRange( + ctx context.Context, + w io.Writer, + head, tail *types.TipSet, + messages, receipts, stateroots bool, + workers int) error { + + h := &car.CarHeader{ + Roots: head.Cids(), + Version: 1, + } + + if err := car.WriteHeader(h, w); err != nil { + return xerrors.Errorf("failed to write car header: %s", err) + } + + start := time.Now() + log.Infow("walking snapshot range", + "head", head.Key(), + "tail", tail.Key(), + "messages", messages, + "receipts", receipts, + "stateroots", + stateroots, + "workers", workers) + + cfg := walkSchedulerConfig{ + numWorkers: workers, + head: head, + tail: tail, + includeMessages: messages, + includeState: stateroots, + includeReceipts: receipts, + } + + pw, err := newWalkScheduler(ctx, cs.UnionStore(), cfg, w) + if err != nil { + return err + } + + // wait until all workers are done. + err = pw.Wait() + if err != nil { + log.Errorw("walker scheduler", "error", err) + return err + } + + log.Infow("walking snapshot range complete", "duration", time.Since(start), "success", err == nil) + return nil +} + func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error { if ts == nil { ts = cs.GetHeaviestTipSet() diff --git a/chain/store/store.go b/chain/store/store.go index 754e3a123..d7188a7bf 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -11,7 +11,7 @@ import ( "sync" "time" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/ipfs/go-cid" dstore "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" @@ -120,8 +120,8 @@ type ChainStore struct { reorgCh chan<- reorg reorgNotifeeCh chan ReorgNotifee - mmCache *lru.ARCCache // msg meta cache (mh.Messages -> secp, bls []cid) - tsCache *lru.ARCCache + mmCache *lru.ARCCache[cid.Cid, mmCids] + tsCache *lru.ARCCache[types.TipSetKey, *types.TipSet] evtTypes [1]journal.EventType journal journal.Journal @@ -133,8 +133,8 @@ type ChainStore struct { } func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore { - c, _ := lru.NewARC(DefaultMsgMetaCacheSize) - tsc, _ := lru.NewARC(DefaultTipSetCacheSize) + c, _ := lru.NewARC[cid.Cid, mmCids](DefaultMsgMetaCacheSize) + tsc, _ := lru.NewARC[types.TipSetKey, *types.TipSet](DefaultTipSetCacheSize) if j == nil { j = journal.NilJournal() } @@ -818,9 +818,8 @@ func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHead } func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { - v, ok := cs.tsCache.Get(tsk) - if ok { - return v.(*types.TipSet), nil + if ts, ok := cs.tsCache.Get(tsk); ok { + return ts, nil } // Fetch tipset block headers from blockstore in parallel diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 89d0caa2c..cc72acc95 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -15,6 +15,7 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/stmgr" @@ -195,7 +196,8 @@ func TestChainExportImportFull(t *testing.T) { } nbs := blockstore.NewMemorySync() - cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) + ds := datastore.NewMapDatastore() + cs := store.NewChainStore(nbs, nbs, ds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck root, err := cs.Import(context.TODO(), buf) @@ -212,7 +214,7 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal("imported chain differed from exported chain") } - sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule()) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds) if err != nil { t.Fatal(err) } diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 4c7d16a9a..f641f0ff9 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -7,11 +7,12 @@ import ( "sync" "time" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" bserv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" blocks "github.com/ipfs/go-libipfs/blocks" logging "github.com/ipfs/go-log/v2" + "github.com/ipni/storetheindex/announce/message" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/peer" @@ -20,7 +21,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-legs/dtsync" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" @@ -217,7 +217,7 @@ func fetchCids( type BlockValidator struct { self peer.ID - peers *lru.TwoQueueCache + peers *lru.TwoQueueCache[peer.ID, int] killThresh int @@ -231,7 +231,7 @@ type BlockValidator struct { } func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Consensus, blacklist func(peer.ID)) *BlockValidator { - p, _ := lru.New2Q(4096) + p, _ := lru.New2Q[peer.ID, int](4096) return &BlockValidator{ self: self, peers: p, @@ -244,21 +244,19 @@ func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Cons } func (bv *BlockValidator) flagPeer(p peer.ID) { - v, ok := bv.peers.Get(p) + val, ok := bv.peers.Get(p) if !ok { - bv.peers.Add(p, int(1)) + bv.peers.Add(p, 1) return } - val := v.(int) - if val >= bv.killThresh { log.Warnf("blacklisting peer %s", p) bv.blacklist(p) return } - bv.peers.Add(p, v.(int)+1) + bv.peers.Add(p, val+1) } func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) { @@ -273,7 +271,7 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub }() var what string - res, what = bv.consensus.ValidateBlockPubsub(ctx, pid == bv.self, msg) + res, what = consensus.ValidateBlockPubsub(ctx, bv.consensus, pid == bv.self, msg) if res == pubsub.ValidationAccept { // it's a good block! make sure we've only seen it once if count := bv.recvBlocks.add(msg.ValidatorData.(*types.BlockMsg).Cid()); count > 0 { @@ -293,11 +291,11 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub } type blockReceiptCache struct { - blocks *lru.TwoQueueCache + blocks *lru.TwoQueueCache[cid.Cid, int] } func newBlockReceiptCache() *blockReceiptCache { - c, _ := lru.New2Q(8192) + c, _ := lru.New2Q[cid.Cid, int](8192) return &blockReceiptCache{ blocks: c, @@ -307,12 +305,12 @@ func newBlockReceiptCache() *blockReceiptCache { func (brc *blockReceiptCache) add(bcid cid.Cid) int { val, ok := brc.blocks.Get(bcid) if !ok { - brc.blocks.Add(bcid, int(1)) + brc.blocks.Add(bcid, 1) return 0 } - brc.blocks.Add(bcid, val.(int)+1) - return val.(int) + brc.blocks.Add(bcid, val+1) + return val } type MessageValidator struct { @@ -466,13 +464,13 @@ type peerMsgInfo struct { type IndexerMessageValidator struct { self peer.ID - peerCache *lru.TwoQueueCache + peerCache *lru.TwoQueueCache[address.Address, *peerMsgInfo] chainApi full.ChainModuleAPI stateApi full.StateModuleAPI } func NewIndexerMessageValidator(self peer.ID, chainApi full.ChainModuleAPI, stateApi full.StateModuleAPI) *IndexerMessageValidator { - peerCache, _ := lru.New2Q(8192) + peerCache, _ := lru.New2Q[address.Address, *peerMsgInfo](8192) return &IndexerMessageValidator{ self: self, @@ -497,7 +495,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg return pubsub.ValidationIgnore } - idxrMsg := dtsync.Message{} + idxrMsg := message.Message{} err := idxrMsg.UnmarshalCBOR(bytes.NewBuffer(msg.Data)) if err != nil { log.Errorw("Could not decode indexer pubsub message", "err", err) @@ -515,15 +513,12 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg return pubsub.ValidationReject } - minerID := minerAddr.String() msgCid := idxrMsg.Cid var msgInfo *peerMsgInfo - val, ok := v.peerCache.Get(minerID) + msgInfo, ok := v.peerCache.Get(minerAddr) if !ok { msgInfo = &peerMsgInfo{} - } else { - msgInfo = val.(*peerMsgInfo) } // Lock this peer's message info. @@ -544,7 +539,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg // Check that the miner ID maps to the peer that sent the message. err = v.authenticateMessage(ctx, minerAddr, originPeer) if err != nil { - log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerID) + log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerAddr) stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1)) return pubsub.ValidationReject } @@ -554,7 +549,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg // messages from the same peer are handled concurrently, there is a // small chance that one msgInfo could replace the other here when // the info is first cached. This is OK, so no need to prevent it. - v.peerCache.Add(minerID, msgInfo) + v.peerCache.Add(minerAddr, msgInfo) } } diff --git a/chain/sub/incoming_test.go b/chain/sub/incoming_test.go index d17b08825..03f880c58 100644 --- a/chain/sub/incoming_test.go +++ b/chain/sub/incoming_test.go @@ -9,12 +9,12 @@ import ( "github.com/golang/mock/gomock" "github.com/ipfs/go-cid" blocks "github.com/ipfs/go-libipfs/blocks" + "github.com/ipni/storetheindex/announce/message" pubsub "github.com/libp2p/go-libp2p-pubsub" pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-legs/dtsync" "github.com/filecoin-project/lotus/api/mocks" "github.com/filecoin-project/lotus/chain/types" @@ -105,7 +105,7 @@ func TestIndexerMessageValidator_Validate(t *testing.T) { mc := gomock.NewController(t) node := mocks.NewMockFullNode(mc) subject := NewIndexerMessageValidator(peer.ID(tc.selfPID), node, node) - message := dtsync.Message{ + message := message.Message{ Cid: validCid, Addrs: nil, ExtraData: tc.extraData, diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index 314b2be6c..90d1a14c5 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -7,6 +7,7 @@ import ( "io" "math" "sort" + time "time" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -15,6 +16,7 @@ import ( address "github.com/filecoin-project/go-address" abi "github.com/filecoin-project/go-state-types/abi" crypto "github.com/filecoin-project/go-state-types/crypto" + exitcode "github.com/filecoin-project/go-state-types/exitcode" proof "github.com/filecoin-project/go-state-types/proof" ) @@ -338,7 +340,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -616,7 +618,7 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -826,7 +828,7 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -1589,7 +1591,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -2078,3 +2080,659 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { } return nil } + +var lengthBufGasTrace = []byte{133} + +func (t *GasTrace) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufGasTrace); err != nil { + return err + } + + // t.Name (string) (string) + if len(t.Name) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Name was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Name)); err != nil { + return err + } + + // t.TotalGas (int64) (int64) + if t.TotalGas >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalGas)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TotalGas-1)); err != nil { + return err + } + } + + // t.ComputeGas (int64) (int64) + if t.ComputeGas >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ComputeGas)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ComputeGas-1)); err != nil { + return err + } + } + + // t.StorageGas (int64) (int64) + if t.StorageGas >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StorageGas)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StorageGas-1)); err != nil { + return err + } + } + + // t.TimeTaken (time.Duration) (int64) + if t.TimeTaken >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TimeTaken)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TimeTaken-1)); err != nil { + return err + } + } + return nil +} + +func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { + *t = GasTrace{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Name (string) (string) + + { + sval, err := cbg.ReadString(cr) + if err != nil { + return err + } + + t.Name = string(sval) + } + // t.TotalGas (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TotalGas = int64(extraI) + } + // t.ComputeGas (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ComputeGas = int64(extraI) + } + // t.StorageGas (int64) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StorageGas = int64(extraI) + } + // t.TimeTaken (time.Duration) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TimeTaken = time.Duration(extraI) + } + return nil +} + +var lengthBufMessageTrace = []byte{134} + +func (t *MessageTrace) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufMessageTrace); err != nil { + return err + } + + // t.From (address.Address) (struct) + if err := t.From.MarshalCBOR(cw); err != nil { + return err + } + + // t.To (address.Address) (struct) + if err := t.To.MarshalCBOR(cw); err != nil { + return err + } + + // t.Value (big.Int) (struct) + if err := t.Value.MarshalCBOR(cw); err != nil { + return err + } + + // t.Method (abi.MethodNum) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Method)); err != nil { + return err + } + + // t.Params ([]uint8) (slice) + if len(t.Params) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Params was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Params))); err != nil { + return err + } + + if _, err := cw.Write(t.Params[:]); err != nil { + return err + } + + // t.ParamsCodec (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ParamsCodec)); err != nil { + return err + } + + return nil +} + +func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { + *t = MessageTrace{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 6 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.From (address.Address) (struct) + + { + + if err := t.From.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.From: %w", err) + } + + } + // t.To (address.Address) (struct) + + { + + if err := t.To.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.To: %w", err) + } + + } + // t.Value (big.Int) (struct) + + { + + if err := t.Value.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Value: %w", err) + } + + } + // t.Method (abi.MethodNum) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Method = abi.MethodNum(extra) + + } + // t.Params ([]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Params: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + return err + } + // t.ParamsCodec (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ParamsCodec = uint64(extra) + + } + return nil +} + +var lengthBufReturnTrace = []byte{131} + +func (t *ReturnTrace) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufReturnTrace); err != nil { + return err + } + + // t.ExitCode (exitcode.ExitCode) (int64) + if t.ExitCode >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil { + return err + } + } + + // t.Return ([]uint8) (slice) + if len(t.Return) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Return was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Return))); err != nil { + return err + } + + if _, err := cw.Write(t.Return[:]); err != nil { + return err + } + + // t.ReturnCodec (uint64) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ReturnCodec)); err != nil { + return err + } + + return nil +} + +func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { + *t = ReturnTrace{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ExitCode (exitcode.ExitCode) (int64) + { + maj, extra, err := cr.ReadHeader() + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ExitCode = exitcode.ExitCode(extraI) + } + // t.Return ([]uint8) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Return: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Return = make([]uint8, extra) + } + + if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + return err + } + // t.ReturnCodec (uint64) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ReturnCodec = uint64(extra) + + } + return nil +} + +var lengthBufExecutionTrace = []byte{132} + +func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufExecutionTrace); err != nil { + return err + } + + // t.Msg (types.MessageTrace) (struct) + if err := t.Msg.MarshalCBOR(cw); err != nil { + return err + } + + // t.MsgRct (types.ReturnTrace) (struct) + if err := t.MsgRct.MarshalCBOR(cw); err != nil { + return err + } + + // t.GasCharges ([]*types.GasTrace) (slice) + if len(t.GasCharges) > 1000000000 { + return xerrors.Errorf("Slice value in field t.GasCharges was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.GasCharges))); err != nil { + return err + } + for _, v := range t.GasCharges { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + + // t.Subcalls ([]types.ExecutionTrace) (slice) + if len(t.Subcalls) > 1000000000 { + return xerrors.Errorf("Slice value in field t.Subcalls was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Subcalls))); err != nil { + return err + } + for _, v := range t.Subcalls { + if err := v.MarshalCBOR(cw); err != nil { + return err + } + } + return nil +} + +func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { + *t = ExecutionTrace{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Msg (types.MessageTrace) (struct) + + { + + if err := t.Msg.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Msg: %w", err) + } + + } + // t.MsgRct (types.ReturnTrace) (struct) + + { + + if err := t.MsgRct.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.MsgRct: %w", err) + } + + } + // t.GasCharges ([]*types.GasTrace) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 1000000000 { + return fmt.Errorf("t.GasCharges: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.GasCharges = make([]*GasTrace, extra) + } + + for i := 0; i < int(extra); i++ { + + var v GasTrace + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.GasCharges[i] = &v + } + + // t.Subcalls ([]types.ExecutionTrace) (slice) + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + + if extra > 1000000000 { + return fmt.Errorf("t.Subcalls: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Subcalls = make([]ExecutionTrace, extra) + } + + for i := 0; i < int(extra); i++ { + + var v ExecutionTrace + if err := v.UnmarshalCBOR(cr); err != nil { + return err + } + + t.Subcalls[i] = v + } + + return nil +} diff --git a/chain/types/ethtypes/eth_transactions.go b/chain/types/ethtypes/eth_transactions.go index 7afde4bd2..6c13c5bf6 100644 --- a/chain/types/ethtypes/eth_transactions.go +++ b/chain/types/ethtypes/eth_transactions.go @@ -44,14 +44,6 @@ type EthTx struct { S EthBigInt `json:"s"` } -func (tx *EthTx) Reward(blkBaseFee big.Int) EthBigInt { - availablePriorityFee := big.Sub(big.Int(tx.MaxFeePerGas), blkBaseFee) - if big.Cmp(big.Int(tx.MaxPriorityFeePerGas), availablePriorityFee) <= 0 { - return tx.MaxPriorityFeePerGas - } - return EthBigInt(availablePriorityFee) -} - type EthTxArgs struct { ChainID int `json:"chainId"` Nonce int `json:"nonce"` diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index ddda91ea0..f157c7f94 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -295,17 +295,21 @@ func EthAddressFromPubKey(pubk []byte) ([]byte, error) { return ethAddr, nil } +var maskedIDPrefix = [20 - 8]byte{0xff} + func IsEthAddress(addr address.Address) bool { if addr.Protocol() != address.Delegated { return false } payload := addr.Payload() - namespace, _, err := varint.FromUvarint(payload) + namespace, offset, err := varint.FromUvarint(payload) if err != nil { return false } - return namespace == builtintypes.EthereumAddressManagerActorID + payload = payload[offset:] + + return namespace == builtintypes.EthereumAddressManagerActorID && len(payload) == 20 && !bytes.HasPrefix(payload, maskedIDPrefix[:]) } func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) { @@ -326,9 +330,17 @@ func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) { return EthAddress{}, xerrors.Errorf("invalid delegated address namespace in: %s", addr) } payload = payload[n:] - if namespace == builtintypes.EthereumAddressManagerActorID { - return CastEthAddress(payload) + if namespace != builtintypes.EthereumAddressManagerActorID { + return EthAddress{}, ErrInvalidAddress } + ethAddr, err := CastEthAddress(payload) + if err != nil { + return EthAddress{}, err + } + if ethAddr.IsMaskedID() { + return EthAddress{}, xerrors.Errorf("f410f addresses cannot embed masked-ID payloads: %s", ethAddr) + } + return ethAddr, nil } return EthAddress{}, ErrInvalidAddress } @@ -376,8 +388,7 @@ func (ea *EthAddress) UnmarshalJSON(b []byte) error { } func (ea EthAddress) IsMaskedID() bool { - idmask := [12]byte{0xff} - return bytes.Equal(ea[:12], idmask[:]) + return bytes.HasPrefix(ea[:], maskedIDPrefix[:]) } func (ea EthAddress) ToFilecoinAddress() (address.Address, error) { diff --git a/chain/types/ethtypes/eth_types_test.go b/chain/types/ethtypes/eth_types_test.go index 118fbc901..4a73184c2 100644 --- a/chain/types/ethtypes/eth_types_test.go +++ b/chain/types/ethtypes/eth_types_test.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" ) type TestCase struct { @@ -178,6 +179,20 @@ func TestParseEthAddr(t *testing.T) { } } +func TestMaskedIDInF4(t *testing.T) { + addr, err := address.NewIDAddress(100) + require.NoError(t, err) + + eaddr, err := EthAddressFromFilecoinAddress(addr) + require.NoError(t, err) + + badaddr, err := address.NewDelegatedAddress(builtin.EthereumAddressManagerActorID, eaddr[:]) + require.NoError(t, err) + + _, err = EthAddressFromFilecoinAddress(badaddr) + require.Error(t, err) +} + func TestUnmarshalEthCall(t *testing.T) { data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":""}` diff --git a/chain/types/execresult.go b/chain/types/execresult.go index 98d06a390..2a25d22e2 100644 --- a/chain/types/execresult.go +++ b/chain/types/execresult.go @@ -2,44 +2,41 @@ package types import ( "encoding/json" - "fmt" - "regexp" - "runtime" - "strings" "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" ) -type ExecutionTrace struct { - Msg *Message - MsgRct *MessageReceipt - Error string - Duration time.Duration - GasCharges []*GasTrace - - Subcalls []ExecutionTrace -} - type GasTrace struct { - Name string - - Location []Loc `json:"loc"` - TotalGas int64 `json:"tg"` - ComputeGas int64 `json:"cg"` - StorageGas int64 `json:"sg"` - TotalVirtualGas int64 `json:"vtg"` - VirtualComputeGas int64 `json:"vcg"` - VirtualStorageGas int64 `json:"vsg"` - - TimeTaken time.Duration `json:"tt"` - Extra interface{} `json:"ex,omitempty"` - - Callers []uintptr `json:"-"` + Name string + TotalGas int64 `json:"tg"` + ComputeGas int64 `json:"cg"` + StorageGas int64 `json:"sg"` + TimeTaken time.Duration `json:"tt"` } -type Loc struct { - File string - Line int - Function string +type MessageTrace struct { + From address.Address + To address.Address + Value abi.TokenAmount + Method abi.MethodNum + Params []byte + ParamsCodec uint64 +} + +type ReturnTrace struct { + ExitCode exitcode.ExitCode + Return []byte + ReturnCodec uint64 +} + +type ExecutionTrace struct { + Msg MessageTrace + MsgRct ReturnTrace + GasCharges []*GasTrace `cborgen:"maxlen=1000000000"` + Subcalls []ExecutionTrace `cborgen:"maxlen=1000000000"` } func (et ExecutionTrace) SumGas() GasTrace { @@ -52,71 +49,13 @@ func SumGas(charges []*GasTrace) GasTrace { out.TotalGas += gc.TotalGas out.ComputeGas += gc.ComputeGas out.StorageGas += gc.StorageGas - - out.TotalVirtualGas += gc.TotalVirtualGas - out.VirtualComputeGas += gc.VirtualComputeGas - out.VirtualStorageGas += gc.VirtualStorageGas } return out } -func (l Loc) Show() bool { - ignorePrefix := []string{ - "reflect.", - "github.com/filecoin-project/lotus/chain/vm.(*Invoker).transform", - "github.com/filecoin-project/go-amt-ipld/", - } - for _, pre := range ignorePrefix { - if strings.HasPrefix(l.Function, pre) { - return false - } - } - return true -} -func (l Loc) String() string { - file := strings.Split(l.File, "/") - - fn := strings.Split(l.Function, "/") - var fnpkg string - if len(fn) > 2 { - fnpkg = strings.Join(fn[len(fn)-2:], "/") - } else { - fnpkg = l.Function - } - - return fmt.Sprintf("%s@%s:%d", fnpkg, file[len(file)-1], l.Line) -} - -var importantRegex = regexp.MustCompile(`github.com/filecoin-project/specs-actors/(v\d+/)?actors/builtin`) - -func (l Loc) Important() bool { - return importantRegex.MatchString(l.Function) -} - func (gt *GasTrace) MarshalJSON() ([]byte, error) { type GasTraceCopy GasTrace - if len(gt.Location) == 0 { - if len(gt.Callers) != 0 { - frames := runtime.CallersFrames(gt.Callers) - for { - frame, more := frames.Next() - if frame.Function == "github.com/filecoin-project/lotus/chain/vm.(*VM).ApplyMessage" { - break - } - l := Loc{ - File: frame.File, - Line: frame.Line, - Function: frame.Function, - } - gt.Location = append(gt.Location, l) - if !more { - break - } - } - } - } - cpy := (*GasTraceCopy)(gt) return json.Marshal(cpy) } diff --git a/chain/types/message.go b/chain/types/message.go index a25cd05b6..4304ba659 100644 --- a/chain/types/message.go +++ b/chain/types/message.go @@ -207,6 +207,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) return xerrors.Errorf("'GasLimit' field cannot be greater than a block's gas limit (%d > %d)", m.GasLimit, build.BlockGasLimit) } + if m.GasLimit <= 0 { + return xerrors.Errorf("'GasLimit' field %d must be positive", m.GasLimit) + } + // since prices might vary with time, this is technically semantic validation if m.GasLimit < minGas { return xerrors.Errorf("'GasLimit' field cannot be less than the cost of storing a message on chain %d < %d", m.GasLimit, minGas) @@ -215,4 +219,17 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) return nil } +// EffectiveGasPremium returns the effective gas premium claimable by the miner +// given the supplied base fee. +// +// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the +// specified premium. +func (m *Message) EffectiveGasPremium(baseFee abi.TokenAmount) abi.TokenAmount { + available := big.Sub(m.GasFeeCap, baseFee) + if big.Cmp(m.GasPremium, available) <= 0 { + return m.GasPremium + } + return available +} + const TestGasLimit = 100e6 diff --git a/chain/types/tipset.go b/chain/types/tipset.go index c1aa90fc9..047a1c00e 100644 --- a/chain/types/tipset.go +++ b/chain/types/tipset.go @@ -234,6 +234,10 @@ func (ts *TipSet) MinTicketBlock() *BlockHeader { return min } +func (ts *TipSet) ParentMessageReceipts() cid.Cid { + return ts.blks[0].ParentMessageReceipts +} + func (ts *TipSet) ParentState() cid.Cid { return ts.blks[0].ParentStateRoot } diff --git a/chain/vm/cbor_gen.go b/chain/vm/cbor_gen.go deleted file mode 100644 index edcf06560..000000000 --- a/chain/vm/cbor_gen.go +++ /dev/null @@ -1,391 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package vm - -import ( - "fmt" - "io" - "math" - "sort" - - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" - - types "github.com/filecoin-project/lotus/chain/types" -) - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = math.E -var _ = sort.Sort - -var lengthBufFvmExecutionTrace = []byte{133} - -func (t *FvmExecutionTrace) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufFvmExecutionTrace); err != nil { - return err - } - - // t.Msg (types.Message) (struct) - if err := t.Msg.MarshalCBOR(cw); err != nil { - return err - } - - // t.MsgRct (types.MessageReceipt) (struct) - if err := t.MsgRct.MarshalCBOR(cw); err != nil { - return err - } - - // t.Error (string) (string) - if len(t.Error) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Error was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Error))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Error)); err != nil { - return err - } - - // t.GasCharges ([]vm.FvmGasCharge) (slice) - if len(t.GasCharges) > 1000000000 { - return xerrors.Errorf("Slice value in field t.GasCharges was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.GasCharges))); err != nil { - return err - } - for _, v := range t.GasCharges { - if err := v.MarshalCBOR(cw); err != nil { - return err - } - } - - // t.Subcalls ([]vm.FvmExecutionTrace) (slice) - if len(t.Subcalls) > 1000000000 { - return xerrors.Errorf("Slice value in field t.Subcalls was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Subcalls))); err != nil { - return err - } - for _, v := range t.Subcalls { - if err := v.MarshalCBOR(cw); err != nil { - return err - } - } - return nil -} - -func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { - *t = FvmExecutionTrace{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Msg (types.Message) (struct) - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - t.Msg = new(types.Message) - if err := t.Msg.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.Msg pointer: %w", err) - } - } - - } - // t.MsgRct (types.MessageReceipt) (struct) - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - t.MsgRct = new(types.MessageReceipt) - if err := t.MsgRct.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.MsgRct pointer: %w", err) - } - } - - } - // t.Error (string) (string) - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - t.Error = string(sval) - } - // t.GasCharges ([]vm.FvmGasCharge) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > 1000000000 { - return fmt.Errorf("t.GasCharges: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.GasCharges = make([]FvmGasCharge, extra) - } - - for i := 0; i < int(extra); i++ { - - var v FvmGasCharge - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } - - t.GasCharges[i] = v - } - - // t.Subcalls ([]vm.FvmExecutionTrace) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > 1000000000 { - return fmt.Errorf("t.Subcalls: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Subcalls = make([]FvmExecutionTrace, extra) - } - - for i := 0; i < int(extra); i++ { - - var v FvmExecutionTrace - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } - - t.Subcalls[i] = v - } - - return nil -} - -var lengthBufFvmGasCharge = []byte{132} - -func (t *FvmGasCharge) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufFvmGasCharge); err != nil { - return err - } - - // t.Name (string) (string) - if len(t.Name) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Name was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.Name)); err != nil { - return err - } - - // t.TotalGas (int64) (int64) - if t.TotalGas >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalGas)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TotalGas-1)); err != nil { - return err - } - } - - // t.ComputeGas (int64) (int64) - if t.ComputeGas >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ComputeGas)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ComputeGas-1)); err != nil { - return err - } - } - - // t.StorageGas (int64) (int64) - if t.StorageGas >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StorageGas)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StorageGas-1)); err != nil { - return err - } - } - return nil -} - -func (t *FvmGasCharge) UnmarshalCBOR(r io.Reader) (err error) { - *t = FvmGasCharge{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Name (string) (string) - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - t.Name = string(sval) - } - // t.TotalGas (int64) (int64) - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.TotalGas = int64(extraI) - } - // t.ComputeGas (int64) (int64) - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.ComputeGas = int64(extraI) - } - // t.StorageGas (int64) (int64) - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StorageGas = int64(extraI) - } - return nil -} diff --git a/chain/vm/fvm.go b/chain/vm/fvm.go index 8e78e58d9..7c79972c7 100644 --- a/chain/vm/fvm.go +++ b/chain/vm/fvm.go @@ -51,57 +51,6 @@ type FvmExtern struct { base cid.Cid } -type FvmGasCharge struct { - Name string - TotalGas int64 - ComputeGas int64 - StorageGas int64 -} - -// This may eventually become identical to ExecutionTrace, but we can make incremental progress towards that -type FvmExecutionTrace struct { - Msg *types.Message - MsgRct *types.MessageReceipt - Error string - GasCharges []FvmGasCharge `cborgen:"maxlen=1000000000"` - Subcalls []FvmExecutionTrace `cborgen:"maxlen=1000000000"` -} - -func (t *FvmExecutionTrace) ToExecutionTrace() types.ExecutionTrace { - if t == nil { - return types.ExecutionTrace{} - } - - ret := types.ExecutionTrace{ - Msg: t.Msg, - MsgRct: t.MsgRct, - Error: t.Error, - Subcalls: nil, // Should be nil when there are no subcalls for backwards compatibility - } - - if len(t.GasCharges) > 0 { - ret.GasCharges = make([]*types.GasTrace, len(t.GasCharges)) - for i, v := range t.GasCharges { - ret.GasCharges[i] = &types.GasTrace{ - Name: v.Name, - TotalGas: v.TotalGas, - ComputeGas: v.ComputeGas, - StorageGas: v.StorageGas, - } - } - } - - if len(t.Subcalls) > 0 { - ret.Subcalls = make([]types.ExecutionTrace, len(t.Subcalls)) - - for i, v := range t.Subcalls { - ret.Subcalls[i] = v.ToExecutionTrace() - } - } - - return ret -} - func (x *FvmExtern) TipsetCid(ctx context.Context, epoch abi.ChainEpoch) (cid.Cid, error) { tsk, err := x.tsGet(ctx, epoch) if err != nil { @@ -487,19 +436,9 @@ func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet var et types.ExecutionTrace if len(ret.ExecTraceBytes) != 0 { - var fvmEt FvmExecutionTrace - if err = fvmEt.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil { + if err = et.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil { return nil, xerrors.Errorf("failed to unmarshal exectrace: %w", err) } - et = fvmEt.ToExecutionTrace() - } - - // Set the top-level exectrace info from the message and receipt for backwards compatibility - et.Msg = vmMsg - et.MsgRct = &receipt - et.Duration = duration - if aerr != nil { - et.Error = aerr.Error() } applyRet := &ApplyRet{ @@ -562,18 +501,9 @@ func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (* var et types.ExecutionTrace if len(ret.ExecTraceBytes) != 0 { - var fvmEt FvmExecutionTrace - if err = fvmEt.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil { + if err = et.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil { return nil, xerrors.Errorf("failed to unmarshal exectrace: %w", err) } - et = fvmEt.ToExecutionTrace() - } else { - et.Msg = vmMsg - et.MsgRct = &receipt - et.Duration = duration - if aerr != nil { - et.Error = aerr.Error() - } } applyRet := &ApplyRet{ diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index daa55e4f4..a5b108238 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -6,7 +6,6 @@ import ( "encoding/binary" "fmt" "os" - gruntime "runtime" "time" "github.com/ipfs/go-cid" @@ -571,35 +570,18 @@ func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) { func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError { toUse := gas.Total() if EnableDetailedTracing { - var callers [10]uintptr - - cout := gruntime.Callers(2+skip, callers[:]) - now := build.Clock.Now() if rt.lastGasCharge != nil { rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime) } gasTrace := types.GasTrace{ - Name: gas.Name, - Extra: gas.Extra, + Name: gas.Name, TotalGas: toUse, ComputeGas: gas.ComputeGas, StorageGas: gas.StorageGas, - - VirtualComputeGas: gas.VirtualCompute, - VirtualStorageGas: gas.VirtualStorage, - - Callers: callers[:cout], } - if gasTrace.VirtualStorageGas == 0 { - gasTrace.VirtualStorageGas = gasTrace.StorageGas - } - if gasTrace.VirtualComputeGas == 0 { - gasTrace.VirtualComputeGas = gasTrace.ComputeGas - } - gasTrace.TotalVirtualGas = gasTrace.VirtualComputeGas + gasTrace.VirtualStorageGas rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace) rt.lastGasChargeTime = now diff --git a/chain/vm/vm.go b/chain/vm/vm.go index f09864c2d..c8e3f2519 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -38,6 +38,7 @@ import ( ) const MaxCallDepth = 4096 +const CborCodec = 0x51 var ( log = logging.Logger("vm") @@ -125,6 +126,10 @@ func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error { } func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime { + paramsCodec := uint64(0) + if len(msg.Params) > 0 { + paramsCodec = CborCodec + } rt := &Runtime{ ctx: ctx, vm: vm, @@ -140,7 +145,14 @@ func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent pricelist: PricelistByEpoch(vm.blockHeight), allowInternal: true, callerValidated: false, - executionTrace: types.ExecutionTrace{Msg: msg}, + executionTrace: types.ExecutionTrace{Msg: types.MessageTrace{ + From: msg.From, + To: msg.To, + Value: msg.Value, + Method: msg.Method, + Params: msg.Params, + ParamsCodec: paramsCodec, + }}, } if parent != nil { @@ -369,15 +381,14 @@ func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtim return nil, nil }() - mr := types.MessageReceipt{ - ExitCode: aerrors.RetCode(err), - Return: ret, - GasUsed: rt.gasUsed, + retCodec := uint64(0) + if len(ret) > 0 { + retCodec = CborCodec } - rt.executionTrace.MsgRct = &mr - rt.executionTrace.Duration = time.Since(start) - if err != nil { - rt.executionTrace.Error = err.Error() + rt.executionTrace.MsgRct = types.ReturnTrace{ + ExitCode: aerrors.RetCode(err), + Return: ret, + ReturnCodec: retCodec, } return ret, err, rt diff --git a/cli/backup.go b/cli/backup.go index 83234a423..d2d8f25ff 100644 --- a/cli/backup.go +++ b/cli/backup.go @@ -61,6 +61,10 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma return xerrors.Errorf("expanding file path: %w", err) } + if _, err := os.Stat(fpath); !os.IsNotExist(err) { + return xerrors.Errorf("backup file %s already exists. Overwriting it will corrupt the file, please specify another file name", fpath) + } + out, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return xerrors.Errorf("opening backup file %s: %w", fpath, err) @@ -87,7 +91,12 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma } defer closer() - err = api.CreateBackup(ReqContext(cctx), cctx.Args().First()) + backupPath := cctx.Args().First() + if _, err := os.Stat(backupPath); !os.IsNotExist(err) { + return xerrors.Errorf("backup file %s already exists. Overwriting it will corrupt the file, please specify another file name", backupPath) + } + + err = api.CreateBackup(ReqContext(cctx), backupPath) if err != nil { return err } diff --git a/cli/chain.go b/cli/chain.go index 57316d22d..4344b0773 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "encoding/hex" "encoding/json" + "errors" "fmt" "io" "os" @@ -37,7 +38,7 @@ import ( "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/types" ) @@ -56,6 +57,7 @@ var ChainCmd = &cli.Command{ ChainGetCmd, ChainBisectCmd, ChainExportCmd, + ChainExportRangeCmd, SlashConsensusFault, ChainGasPriceCmd, ChainInspectUsage, @@ -112,8 +114,8 @@ var ChainGetBlock = &cli.Command{ defer closer() ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must pass cid of block to print") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } bcid, err := cid.Decode(cctx.Args().First()) @@ -198,6 +200,10 @@ var ChainReadObjCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) + } + c, err := cid.Decode(cctx.Args().First()) if err != nil { return fmt.Errorf("failed to parse cid input: %s", err) @@ -233,6 +239,10 @@ var ChainDeleteObjCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) + } + c, err := cid.Decode(cctx.Args().First()) if err != nil { return fmt.Errorf("failed to parse cid input: %s", err) @@ -276,6 +286,10 @@ var ChainStatObjCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) + } + obj, err := cid.Decode(cctx.Args().First()) if err != nil { return fmt.Errorf("failed to parse cid input: %s", err) @@ -308,8 +322,8 @@ var ChainGetMsgCmd = &cli.Command{ Action: func(cctx *cli.Context) error { afmt := NewAppFmt(cctx.App) - if !cctx.Args().Present() { - return fmt.Errorf("must pass a cid of a message to get") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) @@ -374,6 +388,10 @@ var ChainSetHeadCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) + } + var ts *types.TipSet if cctx.Bool("genesis") { @@ -493,7 +511,7 @@ var ChainInspectUsage = &cli.Command{ return err } - mm := filcns.NewActorRegistry().Methods[code][m.Message.Method] // TODO: use remote map + mm := consensus.NewActorRegistry().Methods[code][m.Message.Method] // TODO: use remote map byMethod[mm.Name] += m.Message.GasLimit byMethodC[mm.Name]++ @@ -734,6 +752,10 @@ var ChainGetCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) + } + p := path.Clean(cctx.Args().First()) if strings.HasPrefix(p, "/pstate") { p = p[len("/pstate"):] @@ -1071,8 +1093,8 @@ var ChainExportCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must specify filename to export chain to") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } rsrs := abi.ChainEpoch(cctx.Int64("recent-stateroots")) @@ -1125,6 +1147,109 @@ var ChainExportCmd = &cli.Command{ }, } +var ChainExportRangeCmd = &cli.Command{ + Name: "export-range", + Usage: "export chain to a car file", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "head", + Usage: "specify tipset to start the export from (higher epoch)", + Value: "@head", + }, + &cli.StringFlag{ + Name: "tail", + Usage: "specify tipset to end the export at (lower epoch)", + Value: "@tail", + }, + &cli.BoolFlag{ + Name: "messages", + Usage: "specify if messages should be include", + Value: false, + }, + &cli.BoolFlag{ + Name: "receipts", + Usage: "specify if receipts should be include", + Value: false, + }, + &cli.BoolFlag{ + Name: "stateroots", + Usage: "specify if stateroots should be include", + Value: false, + }, + &cli.IntFlag{ + Name: "workers", + Usage: "specify the number of workers", + Value: 1, + }, + &cli.IntFlag{ + Name: "write-buffer", + Usage: "specify write buffer size", + Value: 1 << 20, + }, + &cli.BoolFlag{ + Name: "internal", + Usage: "write the file locally to disk", + Value: true, + Hidden: true, // currently, non-internal export is not implemented. + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + var head, tail *types.TipSet + headstr := cctx.String("head") + if headstr == "@head" { + head, err = api.ChainHead(ctx) + if err != nil { + return err + } + } else { + head, err = ParseTipSetRef(ctx, api, headstr) + if err != nil { + return fmt.Errorf("parsing head: %w", err) + } + } + tailstr := cctx.String("tail") + if tailstr == "@tail" { + tail, err = api.ChainGetGenesis(ctx) + if err != nil { + return err + } + } else { + tail, err = ParseTipSetRef(ctx, api, tailstr) + if err != nil { + return fmt.Errorf("parsing tail: %w", err) + } + } + + if head.Height() < tail.Height() { + return errors.New("Height of --head tipset must be greater or equal to the height of the --tail tipset") + } + + if !cctx.Bool("internal") { + return errors.New("Non-internal exports are not implemented") + } + + err = api.ChainExportRangeInternal(ctx, head.Key(), tail.Key(), lapi.ChainExportConfig{ + WriteBufferSize: cctx.Int("write-buffer"), + NumWorkers: cctx.Int("workers"), + IncludeMessages: cctx.Bool("messages"), + IncludeReceipts: cctx.Bool("receipts"), + IncludeStateRoots: cctx.Bool("stateroots"), + }) + if err != nil { + return err + } + return nil + }, +} + var SlashConsensusFault = &cli.Command{ Name: "slash-consensus", Usage: "Report consensus fault", @@ -1466,7 +1591,64 @@ func createExportFile(app *cli.App, path string) (io.WriteCloser, error) { var ChainPruneCmd = &cli.Command{ Name: "prune", - Usage: "prune the stored chain state and perform garbage collection", + Usage: "splitstore gc", + Subcommands: []*cli.Command{ + chainPruneColdCmd, + chainPruneHotGCCmd, + chainPruneHotMovingGCCmd, + }, +} + +var chainPruneHotGCCmd = &cli.Command{ + Name: "hot", + Usage: "run online (badger vlog) garbage collection on hotstore", + Flags: []cli.Flag{ + &cli.Float64Flag{Name: "threshold", Value: 0.01, Usage: "Threshold of vlog garbage for gc"}, + &cli.BoolFlag{Name: "periodic", Value: false, Usage: "Run periodic gc over multiple vlogs. Otherwise run gc once"}, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + opts := lapi.HotGCOpts{} + opts.Periodic = cctx.Bool("periodic") + opts.Threshold = cctx.Float64("threshold") + + gcStart := time.Now() + err = api.ChainHotGC(ctx, opts) + gcTime := time.Since(gcStart) + fmt.Printf("Online GC took %v (periodic <%t> threshold <%f>)", gcTime, opts.Periodic, opts.Threshold) + return err + }, +} + +var chainPruneHotMovingGCCmd = &cli.Command{ + Name: "hot-moving", + Usage: "run moving gc on hotstore", + Action: func(cctx *cli.Context) error { + api, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + opts := lapi.HotGCOpts{} + opts.Moving = true + + gcStart := time.Now() + err = api.ChainHotGC(ctx, opts) + gcTime := time.Since(gcStart) + fmt.Printf("Moving GC took %v", gcTime) + return err + }, +} + +var chainPruneColdCmd = &cli.Command{ + Name: "compact-cold", + Usage: "force splitstore compaction on cold store state and run gc", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "online-gc", diff --git a/cli/client.go b/cli/client.go index a8355f9a1..88f7ed208 100644 --- a/cli/client.go +++ b/cli/client.go @@ -31,7 +31,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" @@ -166,8 +166,8 @@ var clientDropCmd = &cli.Command{ Usage: "Remove import", ArgsUsage: "[import ID...]", Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return xerrors.Errorf("no imports specified") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) @@ -996,9 +996,8 @@ var clientFindCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - fmt.Println("Usage: find [CID]") - return nil + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } file, err := cid.Parse(cctx.Args().First()) @@ -1063,8 +1062,7 @@ var clientQueryRetrievalAskCmd = &cli.Command{ Action: func(cctx *cli.Context) error { afmt := NewAppFmt(cctx.App) if cctx.NArg() != 2 { - afmt.Println("Usage: retrieval-ask [minerAddress] [data CID]") - return nil + return IncorrectNumArgs(cctx) } maddr, err := address.NewFromString(cctx.Args().First()) @@ -1120,11 +1118,6 @@ var clientListRetrievalsCmd = &cli.Command{ Aliases: []string{"v"}, Usage: "print verbose deal details", }, - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, &cli.BoolFlag{ Name: "show-failed", Usage: "show failed/failing deals", @@ -1140,10 +1133,6 @@ var clientListRetrievalsCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -1639,8 +1628,7 @@ var clientQueryAskCmd = &cli.Command{ Action: func(cctx *cli.Context) error { afmt := NewAppFmt(cctx.App) if cctx.NArg() != 1 { - afmt.Println("Usage: query-ask [minerAddress]") - return nil + return IncorrectNumArgs(cctx) } maddr, err := address.NewFromString(cctx.Args().First()) @@ -1712,11 +1700,6 @@ var clientListDeals = &cli.Command{ Aliases: []string{"v"}, Usage: "print verbose deal details", }, - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, &cli.BoolFlag{ Name: "show-failed", Usage: "show failed/failing deals", @@ -1727,10 +1710,6 @@ var clientListDeals = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -1944,8 +1923,8 @@ var clientGetDealCmd = &cli.Command{ Usage: "Print detailed deal information", ArgsUsage: "[proposalCID]", Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) @@ -2058,8 +2037,8 @@ var clientStat = &cli.Command{ defer closer() ctx := ReqContext(cctx) - if !cctx.Args().Present() || cctx.NArg() != 1 { - return fmt.Errorf("must specify cid of data") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } dataCid, err := cid.Parse(cctx.Args().First()) @@ -2080,8 +2059,9 @@ var clientStat = &cli.Command{ } var clientRestartTransfer = &cli.Command{ - Name: "restart-transfer", - Usage: "Force restart a stalled data transfer", + Name: "restart-transfer", + Usage: "Force restart a stalled data transfer", + ArgsUsage: "[transferID]", Flags: []cli.Flag{ &cli.StringFlag{ Name: "peerid", @@ -2094,8 +2074,8 @@ var clientRestartTransfer = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -2140,8 +2120,9 @@ var clientRestartTransfer = &cli.Command{ } var clientCancelTransfer = &cli.Command{ - Name: "cancel-transfer", - Usage: "Force cancel a data transfer", + Name: "cancel-transfer", + Usage: "Force cancel a data transfer", + ArgsUsage: "[transferID]", Flags: []cli.Flag{ &cli.StringFlag{ Name: "peerid", @@ -2159,8 +2140,8 @@ var clientCancelTransfer = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return cli.ShowCommandHelp(cctx, cctx.Command.Name) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -2242,11 +2223,6 @@ var clientListTransfers = &cli.Command{ Aliases: []string{"v"}, Usage: "print verbose transfer details", }, - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, &cli.BoolFlag{ Name: "completed", Usage: "show completed data transfers", @@ -2261,10 +2237,6 @@ var clientListTransfers = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err diff --git a/cli/disputer.go b/cli/disputer.go index adbdd919e..de3f50324 100644 --- a/cli/disputer.go +++ b/cli/disputer.go @@ -58,8 +58,7 @@ var disputerMsgCmd = &cli.Command{ Flags: []cli.Flag{}, Action: func(cctx *cli.Context) error { if cctx.NArg() != 3 { - fmt.Println("Usage: dispute [minerAddress index postIndex]") - return nil + return IncorrectNumArgs(cctx) } ctx := ReqContext(cctx) diff --git a/cli/evm.go b/cli/evm.go index d153e7212..84cbf8c61 100644 --- a/cli/evm.go +++ b/cli/evm.go @@ -35,6 +35,7 @@ var EvmCmd = &cli.Command{ EvmGetInfoCmd, EvmCallSimulateCmd, EvmGetContractAddress, + EvmGetBytecode, }, } @@ -486,3 +487,51 @@ func ethAddrFromFilecoinAddress(ctx context.Context, addr address.Address, fnapi return ethAddr, faddr, nil } + +var EvmGetBytecode = &cli.Command{ + Name: "bytecode", + Usage: "Write the bytecode of a smart contract to a file", + ArgsUsage: "[contract-address] [file-name]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "bin", + Usage: "write the bytecode as raw binary and don't hex-encode", + }, + }, + Action: func(cctx *cli.Context) error { + + if cctx.NArg() != 2 { + return IncorrectNumArgs(cctx) + } + + contractAddr, err := ethtypes.ParseEthAddress(cctx.Args().Get(0)) + if err != nil { + return err + } + + fileName := cctx.Args().Get(1) + + api, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + code, err := api.EthGetCode(ctx, contractAddr, "latest") + if err != nil { + return err + } + if !cctx.Bool("bin") { + newCode := make([]byte, hex.EncodedLen(len(code))) + hex.Encode(newCode, code) + code = newCode + } + if err := os.WriteFile(fileName, code, 0o666); err != nil { + return xerrors.Errorf("failed to write bytecode to file %s: %w", fileName, err) + } + + fmt.Printf("Code for %s written to %s\n", contractAddr, fileName) + return nil + }, +} diff --git a/cli/filplus.go b/cli/filplus.go index 0e81ccd3e..9fbd2a489 100644 --- a/cli/filplus.go +++ b/cli/filplus.go @@ -43,7 +43,9 @@ var filplusCmd = &cli.Command{ filplusCheckNotaryCmd, filplusSignRemoveDataCapProposal, filplusListAllocationsCmd, + filplusListClaimsCmd, filplusRemoveExpiredAllocationsCmd, + filplusRemoveExpiredClaimsCmd, }, } @@ -310,6 +312,91 @@ var filplusListAllocationsCmd = &cli.Command{ }, } +var filplusListClaimsCmd = &cli.Command{ + Name: "list-claims", + Usage: "List claims made by provider", + ArgsUsage: "providerAddress", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "expired", + Usage: "list only expired claims", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + providerAddr, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + providerIdAddr, err := api.StateLookupID(ctx, providerAddr, types.EmptyTSK) + if err != nil { + return err + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) + + verifregActor, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK) + if err != nil { + return err + } + + verifregState, err := verifreg.Load(store, verifregActor) + if err != nil { + return err + } + + ts, err := api.ChainHead(ctx) + if err != nil { + return err + } + + claimsMap, err := verifregState.GetClaims(providerIdAddr) + if err != nil { + return err + } + + tw := tablewriter.New( + tablewriter.Col("ID"), + tablewriter.Col("Provider"), + tablewriter.Col("Client"), + tablewriter.Col("Data"), + tablewriter.Col("Size"), + tablewriter.Col("TermMin"), + tablewriter.Col("TermMax"), + tablewriter.Col("TermStart"), + tablewriter.Col("Sector"), + ) + + for claimId, claim := range claimsMap { + if ts.Height() > claim.TermMax || !cctx.IsSet("expired") { + tw.Write(map[string]interface{}{ + "ID": claimId, + "Provider": claim.Provider, + "Client": claim.Client, + "Data": claim.Data, + "Size": claim.Size, + "TermMin": claim.TermMin, + "TermMax": claim.TermMax, + "TermStart": claim.TermStart, + "Sector": claim.Sector, + }) + } + } + return tw.Flush(os.Stdout) + }, +} + var filplusRemoveExpiredAllocationsCmd = &cli.Command{ Name: "remove-expired-allocations", Usage: "remove expired allocations (if no allocations are specified all eligible allocations are removed)", @@ -403,6 +490,99 @@ var filplusRemoveExpiredAllocationsCmd = &cli.Command{ }, } +var filplusRemoveExpiredClaimsCmd = &cli.Command{ + Name: "remove-expired-claims", + Usage: "remove expired claims (if no claims are specified all eligible claims are removed)", + ArgsUsage: "providerAddress Optional[...claimId]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send the message from", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.NArg() < 1 { + return IncorrectNumArgs(cctx) + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + args := cctx.Args().Slice() + + providerAddr, err := address.NewFromString(args[0]) + if err != nil { + return err + } + + providerIdAddr, err := api.StateLookupID(ctx, providerAddr, types.EmptyTSK) + if err != nil { + return err + } + + providerId, err := address.IDFromAddress(providerIdAddr) + if err != nil { + return err + } + + fromAddr := providerIdAddr + if from := cctx.String("from"); from != "" { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr + } + + claimIDs := make([]verifregtypes9.ClaimId, len(args)-1) + for i, claimStr := range args[1:] { + id, err := strconv.ParseUint(claimStr, 10, 64) + if err != nil { + return err + } + claimIDs[i] = verifregtypes9.ClaimId(id) + } + + params, err := actors.SerializeParams(&verifregtypes9.RemoveExpiredClaimsParams{ + Provider: abi.ActorID(providerId), + ClaimIds: claimIDs, + }) + if err != nil { + return err + } + + msg := &types.Message{ + To: verifreg.Address, + From: fromAddr, + Method: verifreg.Methods.RemoveExpiredClaims, + Params: params, + } + + smsg, err := api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + return err + } + + fmt.Printf("message sent, now waiting on cid: %s\n", smsg.Cid()) + + mwait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + if mwait.Receipt.ExitCode.IsError() { + return fmt.Errorf("failed to remove expired allocations: %d", mwait.Receipt.ExitCode) + } + + return nil + }, +} + var filplusCheckClientCmd = &cli.Command{ Name: "check-client-datacap", Usage: "check verified client remaining bytes", diff --git a/cli/multisig.go b/cli/multisig.go index 38923a04f..1af2a4c9e 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -28,7 +28,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" - "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/types" ) @@ -325,7 +325,7 @@ var msigInspectCmd = &cli.Command{ fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr) } } else { - method := filcns.NewActorRegistry().Methods[targAct.Code][tx.Method] // TODO: use remote map + method := consensus.NewActorRegistry().Methods[targAct.Code][tx.Method] // TODO: use remote map if decParams && tx.Method != 0 { ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler) diff --git a/cli/params.go b/cli/params.go index 4dcbe67e2..e79eb8e30 100644 --- a/cli/params.go +++ b/cli/params.go @@ -15,8 +15,8 @@ var FetchParamCmd = &cli.Command{ Usage: "Fetch proving parameters", ArgsUsage: "[sectorSize]", Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return xerrors.Errorf("must pass sector size to fetch params for (specify as \"32GiB\", for instance)") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } sectorSizeInt, err := units.RAMInBytes(cctx.Args().First()) if err != nil { diff --git a/cli/services.go b/cli/services.go index d90da419c..ef257693f 100644 --- a/cli/services.go +++ b/cli/services.go @@ -17,7 +17,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/types" ) @@ -88,7 +88,7 @@ func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address return nil, err } - methodMeta, found := filcns.NewActorRegistry().Methods[act.Code][method] // TODO: use remote map + methodMeta, found := consensus.NewActorRegistry().Methods[act.Code][method] // TODO: use remote map if !found { return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code) } diff --git a/cli/state.go b/cli/state.go index 3d629bb0b..3099bff17 100644 --- a/cli/state.go +++ b/cli/state.go @@ -9,7 +9,6 @@ import ( "fmt" "html/template" "io" - "io/ioutil" "os" "reflect" "sort" @@ -22,7 +21,6 @@ import ( "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-multihash" "github.com/urfave/cli/v2" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -41,7 +39,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -99,8 +97,8 @@ var StateMinerProvingDeadlineCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must specify miner to get information for") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -142,8 +140,8 @@ var StateMinerInfo = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must specify miner to get information for") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -252,10 +250,16 @@ func ParseTipSetString(ts string) ([]cid.Cid, error) { return cids, nil } +type TipSetResolver interface { + ChainHead(context.Context) (*types.TipSet, error) + ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + // LoadTipSet gets the tipset from the context, or the head from the API. // // It always gets the head from the API so commands use a consistent tipset even if time pases. -func LoadTipSet(ctx context.Context, cctx *cli.Context, api v0api.FullNode) (*types.TipSet, error) { +func LoadTipSet(ctx context.Context, cctx *cli.Context, api TipSetResolver) (*types.TipSet, error) { tss := cctx.String("tipset") if tss == "" { return api.ChainHead(ctx) @@ -264,7 +268,7 @@ func LoadTipSet(ctx context.Context, cctx *cli.Context, api v0api.FullNode) (*ty return ParseTipSetRef(ctx, api, tss) } -func ParseTipSetRef(ctx context.Context, api v0api.FullNode, tss string) (*types.TipSet, error) { +func ParseTipSetRef(ctx context.Context, api TipSetResolver, tss string) (*types.TipSet, error) { if tss[0] == '@' { if tss == "@head" { return api.ChainHead(ctx) @@ -391,8 +395,8 @@ var StateSectorsCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must specify miner to list sectors for") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } maddr, err := address.NewFromString(cctx.Args().First()) @@ -431,8 +435,8 @@ var StateActiveSectorsCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must specify miner to list sectors for") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } maddr, err := address.NewFromString(cctx.Args().First()) @@ -463,8 +467,8 @@ var StateExecTraceCmd = &cli.Command{ Usage: "Get the execution trace of a given message", ArgsUsage: "", Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return ShowHelp(cctx, fmt.Errorf("must pass message cid")) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } mcid, err := cid.Decode(cctx.Args().First()) @@ -606,8 +610,8 @@ var StateGetDealSetCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must specify deal ID") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } dealid, err := strconv.ParseUint(cctx.Args().First(), 10, 64) @@ -750,8 +754,8 @@ var StateGetActorCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of actor to get") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -804,8 +808,8 @@ var StateLookupIDCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of actor to get") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -848,8 +852,8 @@ var StateSectorSizeCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must pass miner's address") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -885,8 +889,8 @@ var StateReadStateCmd = &cli.Command{ ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of actor to get") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -1085,7 +1089,7 @@ var StateComputeStateCmd = &cli.Command{ var stout *lapi.ComputeStateOutput if csofile := cctx.String("compute-state-output"); csofile != "" { - data, err := ioutil.ReadFile(csofile) + data, err := os.ReadFile(csofile) if err != nil { return err } @@ -1228,7 +1232,7 @@ var compStateTemplate = `
State CID: {{.Comp.Root}}
Calls
{{range .Comp.Trace}} - {{template "message" (Call .ExecutionTrace false .Msg.Cid.String)}} + {{template "message" (Call .ExecutionTrace false .MsgCid.String)}} {{end}} @@ -1253,16 +1257,12 @@ var compStateMsg = ` -
{{.Msg.From}} -> {{.Msg.To}} ({{ToFil .Msg.Value}} FIL), M{{.Msg.Method}}
- {{if not .Subcall}}
Msg CID: {{.Msg.Cid}}
{{end}} +
{{.Msg.From}} -> {{.Msg.To}} ({{ToFil .Msg.Value}}), M{{.Msg.Method}}
+ {{if not .Subcall}}
Msg CID: {{.Hash}}
{{end}} {{if gt (len .Msg.Params) 0}}
{{JsonParams ($code) (.Msg.Method) (.Msg.Params) | html}}
{{end}} - {{if PrintTiming}} -
Took {{.Duration}}, Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
- {{else}}
Exit: {{.MsgRct.ExitCode}}{{if gt (len .MsgRct.Return) 0}}, Return{{end}}
- {{end}} {{if gt (len .MsgRct.Return) 0}}
{{JsonReturn ($code) (.Msg.Method) (.MsgRct.Return) | html}}
{{end}} @@ -1274,62 +1274,26 @@ var compStateMsg = `
Gas Trace - - {{define "virt" -}} - {{- if . -}} - +({{.}}) - {{- end -}} - {{- end}} + {{define "gasC" -}} - + {{- end}} {{range .GasCharges}} - - {{template "gasC" .}} - - - {{end}} - {{with sumGas .GasCharges}} - - {{template "gasC" .}} - - - {{end}} + + + {{template "gasC" .}} + + + {{end}} + {{with sumGas .GasCharges}} + + + {{template "gasC" .}} + + + {{end}}
NameTotal/Compute/StorageTime TakenLocation
NameTotal/Compute/StorageTime Taken
{{.TotalGas}}{{template "virt" .TotalVirtualGas }}/{{.ComputeGas}}{{template "virt" .VirtualComputeGas}}/{{.StorageGas}}{{template "virt" .VirtualStorageGas}}{{.TotalGas}}/{{.ComputeGas}}/{{.StorageGas}}
{{.Name}}{{if .Extra}}:{{.Extra}}{{end}}{{if PrintTiming}}{{.TimeTaken}}{{end}} - {{ $fImp := FirstImportant .Location }} - {{ if $fImp }} -
- {{ $fImp }}
- {{ $elipOn := false }} - {{ range $index, $ele := .Location -}} - {{- if $index }}
{{end -}} - {{- if .Show -}} - {{ if $elipOn }} - {{ $elipOn = false }} - - {{end}} - - {{- if .Important }}{{end -}} - {{- . -}} - {{if .Important }}{{end}} - {{else}} - {{ if not $elipOn }} - {{ $elipOn = true }} - - {{end}} -
- {{end}} -
Sum{{if PrintTiming}}{{.TimeTaken}}{{end}}
{{.Name}}{{if PrintTiming}}{{.TimeTaken}}{{end}}
Sum{{if PrintTiming}}{{.TimeTaken}}{{end}}
@@ -1337,8 +1301,8 @@ var compStateMsg = ` {{if gt (len .Subcalls) 0}}
Subcalls:
{{$hash := .Hash}} - {{range .Subcalls}} - {{template "message" (Call . true (printf "%s-%s" $hash .Msg.Cid.String))}} + {{range $i, $call := .Subcalls}} + {{template "message" (Call $call true (printf "%s-%d" $hash $i))}} {{end}} {{end}} ` @@ -1359,20 +1323,9 @@ func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOut "IsVerySlow": isVerySlow, "IntExit": func(i exitcode.ExitCode) int64 { return int64(i) }, "sumGas": types.SumGas, - "CodeStr": codeStr, + "CodeStr": builtin.ActorNameByCode, "Call": call, "PrintTiming": func() bool { return printTiming }, - "FirstImportant": func(locs []types.Loc) *types.Loc { - if len(locs) != 0 { - for _, l := range locs { - if l.Important() { - return &l - } - } - return &locs[0] - } - return nil - }, }).Parse(compStateTemplate) if err != nil { return err @@ -1402,16 +1355,8 @@ func call(e types.ExecutionTrace, subcall bool, hash string) callMeta { } } -func codeStr(c cid.Cid) string { - cmh, err := multihash.Decode(c.Hash()) - if err != nil { - panic(err) - } - return string(cmh.Digest) -} - func getMethod(code cid.Cid, method abi.MethodNum) string { - return filcns.NewActorRegistry().Methods[code][method].Name // todo: use remote + return consensus.NewActorRegistry().Methods[code][method].Name // todo: use remote } func toFil(f types.BigInt) types.FIL { @@ -1427,7 +1372,7 @@ func isVerySlow(t time.Duration) bool { } func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) { - p, err := stmgr.GetParamType(filcns.NewActorRegistry(), code, method) // todo use api for correct actor registry + p, err := stmgr.GetParamType(consensus.NewActorRegistry(), code, method) // todo use api for correct actor registry if err != nil { return "", err } @@ -1441,7 +1386,7 @@ func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, erro } func JsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) { - methodMeta, found := filcns.NewActorRegistry().Methods[code][method] // TODO: use remote + methodMeta, found := consensus.NewActorRegistry().Methods[code][method] // TODO: use remote if !found { return "", fmt.Errorf("method %d not found on actor %s", method, code) } @@ -1467,8 +1412,8 @@ var StateWaitMsgCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return fmt.Errorf("must specify message cid to wait for") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) @@ -1504,8 +1449,8 @@ var StateSearchMsgCmd = &cli.Command{ Usage: "Search to see whether a message has appeared on chain", ArgsUsage: "[messageCid]", Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return fmt.Errorf("must specify message cid to search for") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) @@ -1836,11 +1781,12 @@ var StateMarketCmd = &cli.Command{ } var stateMarketBalanceCmd = &cli.Command{ - Name: "balance", - Usage: "Get the market balance (locked and escrowed) for a given account", + Name: "balance", + Usage: "Get the market balance (locked and escrowed) for a given account", + ArgsUsage: "[address]", Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return ShowHelp(cctx, fmt.Errorf("must specify address to print market balance for")) + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } api, closer, err := GetFullNodeAPI(cctx) diff --git a/cli/util/retrieval.go b/cli/util/retrieval.go index 3a2ef6077..ac34fcf3a 100644 --- a/cli/util/retrieval.go +++ b/cli/util/retrieval.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "path" @@ -65,7 +64,7 @@ func ClientExportStream(apiAddr string, apiAuth http.Header, eref api.ExportRef, } if resp.StatusCode != http.StatusOK { - em, err := ioutil.ReadAll(resp.Body) + em, err := io.ReadAll(resp.Body) if err != nil { return nil, xerrors.Errorf("reading error body: %w", err) } diff --git a/cli/wallet.go b/cli/wallet.go index 3a21cdaba..c66275cdd 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -6,7 +6,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "io/ioutil" "os" "strings" @@ -251,8 +250,8 @@ var walletSetDefault = &cli.Command{ defer closer() ctx := ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must pass address to set as default") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -279,8 +278,8 @@ var walletExport = &cli.Command{ afmt := NewAppFmt(cctx.App) - if !cctx.Args().Present() { - return fmt.Errorf("must specify key to export") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -337,7 +336,7 @@ var walletImport = &cli.Command{ inpdata = indata } else { - fdata, err := ioutil.ReadFile(cctx.Args().First()) + fdata, err := os.ReadFile(cctx.Args().First()) if err != nil { return err } @@ -414,8 +413,8 @@ var walletSign = &cli.Command{ afmt := NewAppFmt(cctx.App) - if !cctx.Args().Present() || cctx.NArg() != 2 { - return fmt.Errorf("must specify signing address and message to sign") + if cctx.NArg() != 2 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -457,8 +456,8 @@ var walletVerify = &cli.Command{ afmt := NewAppFmt(cctx.App) - if !cctx.Args().Present() || cctx.NArg() != 3 { - return fmt.Errorf("must specify signing address, message, and signature to verify") + if cctx.NArg() != 3 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -509,8 +508,8 @@ var walletDelete = &cli.Command{ defer closer() ctx := ReqContext(cctx) - if !cctx.Args().Present() || cctx.NArg() != 1 { - return fmt.Errorf("must specify address to delete") + if cctx.NArg() != 1 { + return IncorrectNumArgs(cctx) } addr, err := address.NewFromString(cctx.Args().First()) @@ -701,8 +700,8 @@ var walletMarketAdd = &cli.Command{ afmt := NewAppFmt(cctx.App) // Get amount param - if !cctx.Args().Present() { - return fmt.Errorf("must pass amount to add") + if cctx.NArg() < 1 { + return IncorrectNumArgs(cctx) } f, err := types.ParseFIL(cctx.Args().First()) if err != nil { diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 13824d07d..babf9342b 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math" "net/http" _ "net/http/pprof" @@ -34,6 +33,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -157,7 +157,7 @@ var importBenchCmd = &cli.Command{ if rdir := cctx.String("repodir"); rdir != "" { tdir = rdir } else { - tmp, err := ioutil.TempDir("", "lotus-import-bench") + tmp, err := os.MkdirTemp("", "lotus-import-bench") if err != nil { return err } @@ -228,7 +228,7 @@ var importBenchCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // TODO: We need to supply the actual beacon after v14 - stm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil) + stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs) if err != nil { return err } @@ -466,14 +466,11 @@ var importBenchCmd = &cli.Command{ Duration: time.Since(start), } if enc != nil { - stripCallers(tse.Trace) - if err := enc.Encode(tse); err != nil { return xerrors.Errorf("failed to write out tipsetexec: %w", err) } } if inverseChain[i-1].ParentState() != st { - stripCallers(tse.Trace) lastTrace := tse.Trace d, err := json.MarshalIndent(lastTrace, "", " ") if err != nil { @@ -492,21 +489,6 @@ var importBenchCmd = &cli.Command{ }, } -func walkExecutionTrace(et *types.ExecutionTrace) { - for _, gc := range et.GasCharges { - gc.Callers = nil - } - for _, sub := range et.Subcalls { - walkExecutionTrace(&sub) //nolint:scopelint,gosec - } -} - -func stripCallers(trace []*api.InvocResult) { - for _, t := range trace { - walkExecutionTrace(&t.ExecutionTrace) - } -} - type Invocation struct { TipSet types.TipSetKey Invoc *api.InvocResult @@ -514,28 +496,24 @@ type Invocation struct { const GasPerNs = 10 -func countGasCosts(et *types.ExecutionTrace) (int64, int64) { - var cgas, vgas int64 +func countGasCosts(et *types.ExecutionTrace) int64 { + var cgas int64 for _, gc := range et.GasCharges { cgas += gc.ComputeGas - vgas += gc.VirtualComputeGas } for _, sub := range et.Subcalls { - c, v := countGasCosts(&sub) //nolint + c := countGasCosts(&sub) //nolint cgas += c - vgas += v } - return cgas, vgas + return cgas } type stats struct { timeTaken meanVar gasRatio meanVar - - extraCovar *covar } type covar struct { @@ -680,32 +658,8 @@ func (v1 *meanVar) Combine(v2 *meanVar) { v1.m2 = m2 } -func getExtras(ex interface{}) (*string, *float64) { - if t, ok := ex.(string); ok { - return &t, nil - } - if size, ok := ex.(float64); ok { - return nil, &size - } - if exMap, ok := ex.(map[string]interface{}); ok { - t, tok := exMap["type"].(string) - size, sok := exMap["size"].(float64) - if tok && sok { - return &t, &size - } - if tok { - return &t, nil - } - if sok { - return nil, &size - } - return nil, nil - } - return nil, nil -} - func tallyGasCharges(charges map[string]*stats, et types.ExecutionTrace) { - for i, gc := range et.GasCharges { + for _, gc := range et.GasCharges { name := gc.Name if name == "OnIpldGetEnd" { continue @@ -716,45 +670,18 @@ func tallyGasCharges(charges map[string]*stats, et types.ExecutionTrace) { // discard initial very long OnVerifyPost continue } - eType, eSize := getExtras(gc.Extra) - - if name == "OnIpldGet" { - next := &types.GasTrace{} - if i+1 < len(et.GasCharges) { - next = et.GasCharges[i+1] - } - if next.Name != "OnIpldGetEnd" { - log.Warn("OnIpldGet without OnIpldGetEnd") - } else { - _, size := getExtras(next.Extra) - eSize = size - } - } - if eType != nil { - name += "-" + *eType - } compGas := gc.ComputeGas - if compGas == 0 { - compGas = gc.VirtualComputeGas - } - if compGas == 0 { - compGas = 1 - } s := charges[name] if s == nil { s = new(stats) charges[name] = s } - if eSize != nil { - if s.extraCovar == nil { - s.extraCovar = &covar{} - } - s.extraCovar.AddPoint(*eSize, tt) - } - s.timeTaken.AddPoint(tt) + if compGas == 0 { + compGas = 1 + } ratio := tt / float64(compGas) * GasPerNs s.gasRatio.AddPoint(ratio) } @@ -880,13 +807,6 @@ var importAnalyzeCmd = &cli.Command{ } s.timeTaken.Combine(&v.timeTaken) s.gasRatio.Combine(&v.gasRatio) - - if v.extraCovar != nil { - if s.extraCovar == nil { - s.extraCovar = &covar{} - } - s.extraCovar.Combine(v.extraCovar) - } } totalTime += res.totalTime } @@ -902,13 +822,6 @@ var importAnalyzeCmd = &cli.Command{ s := charges[k] fmt.Printf("%s: incr by %.4f~%.4f; tt %.4f~%.4f\n", k, s.gasRatio.Mean(), s.gasRatio.Stddev(), s.timeTaken.Mean(), s.timeTaken.Stddev()) - if s.extraCovar != nil { - fmt.Printf("\t correll: %.2f, tt = %.2f * extra + %.2f\n", s.extraCovar.Correl(), - s.extraCovar.A(), s.extraCovar.B()) - fmt.Printf("\t covar: %.2f, extra: %.2f~%.2f, tt2: %.2f~%.2f, count %.0f\n", - s.extraCovar.Covariance(), s.extraCovar.meanX, s.extraCovar.StddevX(), - s.extraCovar.meanY, s.extraCovar.StddevY(), s.extraCovar.n) - } } sort.Slice(invocs, func(i, j int) bool { diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 46a720e37..12d310b65 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "math/big" "math/rand" "os" @@ -197,7 +196,7 @@ var sealBenchCmd = &cli.Command{ return xerrors.Errorf("creating sectorbuilder dir: %w", err) } - tsdir, err := ioutil.TempDir(sdir, "bench") + tsdir, err := os.MkdirTemp(sdir, "bench") if err != nil { return err } @@ -287,7 +286,7 @@ var sealBenchCmd = &cli.Command{ // sectorbuilder directory... we need a better way to handle // this in other cases - fdata, err := ioutil.ReadFile(filepath.Join(sbdir, "pre-seal-"+maddr.String()+".json")) + fdata, err := os.ReadFile(filepath.Join(sbdir, "pre-seal-"+maddr.String()+".json")) if err != nil { return err } @@ -647,7 +646,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par return err } - if err := ioutil.WriteFile(saveC2inp, b, 0664); err != nil { + if err := os.WriteFile(saveC2inp, b, 0664); err != nil { log.Warnf("%+v", err) } } @@ -761,7 +760,7 @@ var proveCmd = &cli.Command{ return xerrors.Errorf("Usage: lotus-bench prove [input.json]") } - inb, err := ioutil.ReadFile(c.Args().First()) + inb, err := os.ReadFile(c.Args().First()) if err != nil { return xerrors.Errorf("reading input file: %w", err) } diff --git a/cmd/lotus-bench/simple.go b/cmd/lotus-bench/simple.go index 87e2c3bc0..a742b0fb3 100644 --- a/cmd/lotus-bench/simple.go +++ b/cmd/lotus-bench/simple.go @@ -5,7 +5,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "io/ioutil" "os" "strconv" "time" @@ -444,7 +443,7 @@ var simpleCommit1 = &cli.Command{ return err } - if err := ioutil.WriteFile(cctx.Args().Get(4), b, 0664); err != nil { + if err := os.WriteFile(cctx.Args().Get(4), b, 0664); err != nil { log.Warnf("%+v", err) } @@ -478,7 +477,7 @@ var simpleCommit2 = &cli.Command{ return xerrors.Errorf("Usage: lotus-bench prove [input.json]") } - inb, err := ioutil.ReadFile(c.Args().First()) + inb, err := os.ReadFile(c.Args().First()) if err != nil { return xerrors.Errorf("reading input file: %w", err) } @@ -861,7 +860,7 @@ var simpleProveReplicaUpdate1 = &cli.Command{ return xerrors.Errorf("json marshal vanilla proofs: %w", err) } - if err := ioutil.WriteFile(cctx.Args().Get(7), vpjb, 0666); err != nil { + if err := os.WriteFile(cctx.Args().Get(7), vpjb, 0666); err != nil { return xerrors.Errorf("writing vanilla proofs file: %w", err) } @@ -934,7 +933,7 @@ var simpleProveReplicaUpdate2 = &cli.Command{ return xerrors.Errorf("parse commr: %w", err) } - vpb, err := ioutil.ReadFile(cctx.Args().Get(3)) + vpb, err := os.ReadFile(cctx.Args().Get(3)) if err != nil { return xerrors.Errorf("reading valilla proof file: %w", err) } diff --git a/cmd/lotus-fountain/recaptcha.go b/cmd/lotus-fountain/recaptcha.go index 69359faa3..6b2327a03 100644 --- a/cmd/lotus-fountain/recaptcha.go +++ b/cmd/lotus-fountain/recaptcha.go @@ -6,7 +6,7 @@ package main import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -63,7 +63,7 @@ func VerifyToken(token, remoteIP string) (Response, error) { return resp, err } - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) _ = r.Body.Close() // close immediately after reading finished if err != nil { return resp, err diff --git a/cmd/lotus-miner/actor.go b/cmd/lotus-miner/actor.go index dd78adfef..bec2202b4 100644 --- a/cmd/lotus-miner/actor.go +++ b/cmd/lotus-miner/actor.go @@ -55,9 +55,10 @@ var actorCmd = &cli.Command{ } var actorSetAddrsCmd = &cli.Command{ - Name: "set-addresses", - Aliases: []string{"set-addrs"}, - Usage: "set addresses that your miner can be publicly dialed on", + Name: "set-addresses", + Aliases: []string{"set-addrs"}, + Usage: "set addresses that your miner can be publicly dialed on", + ArgsUsage: "", Flags: []cli.Flag{ &cli.StringFlag{ Name: "from", @@ -170,8 +171,9 @@ var actorSetAddrsCmd = &cli.Command{ } var actorSetPeeridCmd = &cli.Command{ - Name: "set-peer-id", - Usage: "set the peer id of your miner", + Name: "set-peer-id", + Usage: "set the peer id of your miner", + ArgsUsage: "", Flags: []cli.Flag{ &cli.Int64Flag{ Name: "gas-limit", @@ -194,6 +196,10 @@ var actorSetPeeridCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) + if cctx.NArg() != 1 { + return lcli.IncorrectNumArgs(cctx) + } + pid, err := peer.Decode(cctx.Args().Get(0)) if err != nil { return fmt.Errorf("failed to parse input as a peerId: %w", err) @@ -433,17 +439,8 @@ var actorControlList = &cli.Command{ &cli.BoolFlag{ Name: "verbose", }, - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -729,15 +726,15 @@ var actorSetOwnerCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + if cctx.NArg() != 2 { + return lcli.IncorrectNumArgs(cctx) + } + if !cctx.Bool("really-do-it") { fmt.Println("Pass --really-do-it to actually execute this action") return nil } - if cctx.NArg() != 2 { - return lcli.IncorrectNumArgs(cctx) - } - api, acloser, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err diff --git a/cmd/lotus-miner/actor_test.go b/cmd/lotus-miner/actor_test.go index 791298ffa..dfb452213 100644 --- a/cmd/lotus-miner/actor_test.go +++ b/cmd/lotus-miner/actor_test.go @@ -34,8 +34,8 @@ func TestWorkerKeyChange(t *testing.T) { kit.QuietMiningLogs() - blocktime := 1 * time.Millisecond - client1, client2, miner, ens := kit.EnsembleTwoOne(t, kit.MockProofs()) + blocktime := 5 * time.Millisecond + client1, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) ens.InterconnectAll().BeginMining(blocktime) output := bytes.NewBuffer(nil) @@ -96,7 +96,4 @@ func TestWorkerKeyChange(t *testing.T) { // Wait for finality (worker key switch). targetHeight := head.Height() + policy.ChainFinality client1.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight)) - - // Make sure the other node can catch up. - client2.WaitTillChain(ctx, kit.HeightAtLeast(targetHeight)) } diff --git a/cmd/lotus-miner/allinfo_test.go b/cmd/lotus-miner/allinfo_test.go index 144bdff70..2388f2f7a 100644 --- a/cmd/lotus-miner/allinfo_test.go +++ b/cmd/lotus-miner/allinfo_test.go @@ -25,7 +25,7 @@ func TestMinerAllInfo(t *testing.T) { kit.QuietMiningLogs() client, miner, ens := kit.EnsembleMinimal(t) - ens.InterconnectAll().BeginMining(time.Second) + ens.InterconnectAll().BeginMiningMustPost(5 * time.Millisecond) run := func(t *testing.T) { app := cli.NewApp() diff --git a/cmd/lotus-miner/config.go b/cmd/lotus-miner/config.go index 652426583..b7af1b2e5 100644 --- a/cmd/lotus-miner/config.go +++ b/cmd/lotus-miner/config.go @@ -31,7 +31,7 @@ var configDefaultCmd = &cli.Command{ Action: func(cctx *cli.Context) error { c := config.DefaultStorageMiner() - cb, err := config.ConfigUpdate(c, nil, !cctx.Bool("no-comment")) + cb, err := config.ConfigUpdate(c, nil, config.Commented(!cctx.Bool("no-comment"))) if err != nil { return err } @@ -83,7 +83,7 @@ var configUpdateCmd = &cli.Command{ cfgDef := config.DefaultStorageMiner() - updated, err := config.ConfigUpdate(cfgNode, cfgDef, !cctx.Bool("no-comment")) + updated, err := config.ConfigUpdate(cfgNode, cfgDef, config.Commented(!cctx.Bool("no-comment"))) if err != nil { return err } diff --git a/cmd/lotus-miner/dagstore.go b/cmd/lotus-miner/dagstore.go index 519b43cc7..c0e37f63b 100644 --- a/cmd/lotus-miner/dagstore.go +++ b/cmd/lotus-miner/dagstore.go @@ -31,18 +31,7 @@ var dagstoreCmd = &cli.Command{ var dagstoreListShardsCmd = &cli.Command{ Name: "list-shards", Usage: "List all shards known to the dagstore, with their current status", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err @@ -64,18 +53,7 @@ var dagstoreRegisterShardCmd = &cli.Command{ Name: "register-shard", ArgsUsage: "[key]", Usage: "Register a shard", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - if cctx.NArg() != 1 { return lcli.IncorrectNumArgs(cctx) } @@ -103,18 +81,7 @@ var dagstoreInitializeShardCmd = &cli.Command{ Name: "initialize-shard", ArgsUsage: "[key]", Usage: "Initialize the specified shard", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - if cctx.NArg() != 1 { return lcli.IncorrectNumArgs(cctx) } @@ -135,18 +102,7 @@ var dagstoreRecoverShardCmd = &cli.Command{ Name: "recover-shard", ArgsUsage: "[key]", Usage: "Attempt to recover a shard in errored state", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - if cctx.NArg() != 1 { return lcli.IncorrectNumArgs(cctx) } @@ -176,17 +132,8 @@ var dagstoreInitializeAllCmd = &cli.Command{ Name: "include-sealed", Usage: "initialize sealed pieces as well", }, - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - concurrency := cctx.Uint("concurrency") sealed := cctx.Bool("sealed") @@ -236,18 +183,7 @@ var dagstoreInitializeAllCmd = &cli.Command{ var dagstoreGcCmd = &cli.Command{ Name: "gc", Usage: "Garbage collect the dagstore", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err @@ -317,18 +253,7 @@ var dagstoreLookupPiecesCmd = &cli.Command{ Name: "lookup-pieces", Usage: "Lookup pieces that a given CID belongs to", ArgsUsage: "", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - if cctx.NArg() != 1 { return lcli.IncorrectNumArgs(cctx) } diff --git a/cmd/lotus-miner/index_provider.go b/cmd/lotus-miner/index_provider.go index 4ed14549d..2b6838a4b 100644 --- a/cmd/lotus-miner/index_provider.go +++ b/cmd/lotus-miner/index_provider.go @@ -3,7 +3,6 @@ package main import ( "fmt" - "github.com/fatih/color" "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" @@ -23,18 +22,7 @@ var indexProvAnnounceCmd = &cli.Command{ Name: "announce", ArgsUsage: "", Usage: "Announce a deal to indexers so they can download its index", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - if cctx.NArg() != 1 { return lcli.IncorrectNumArgs(cctx) } @@ -60,18 +48,7 @@ var indexProvAnnounceCmd = &cli.Command{ var indexProvAnnounceAllCmd = &cli.Command{ Name: "announce-all", Usage: "Announce all active deals to indexers so they can download the indices", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err diff --git a/cmd/lotus-miner/info.go b/cmd/lotus-miner/info.go index d791b0760..974635eb2 100644 --- a/cmd/lotus-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -7,8 +7,6 @@ import ( corebig "math/big" "os" "sort" - "strings" - "text/tabwriter" "time" "github.com/fatih/color" @@ -18,8 +16,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/specs-actors/actors/builtin" @@ -65,12 +61,6 @@ func infoCmdAct(cctx *cli.Context) error { } defer closer() - marketsApi, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - fullapi, acloser, err := lcli.GetFullNodeAPIV1(cctx) if err != nil { return err @@ -84,14 +74,7 @@ func infoCmdAct(cctx *cli.Context) error { return err } - fmt.Println("Enabled subsystems (from miner API):", subsystems) - - subsystems, err = marketsApi.RuntimeSubsystems(ctx) - if err != nil { - return err - } - - fmt.Println("Enabled subsystems (from markets API):", subsystems) + fmt.Println("Enabled subsystems:", subsystems) start, err := minerApi.StartTime(ctx) if err != nil { @@ -128,11 +111,6 @@ func infoCmdAct(cctx *cli.Context) error { return err } - err = handleMarketsInfo(ctx, marketsApi) - if err != nil { - return err - } - return nil } @@ -393,105 +371,6 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v1api.Full return nil } -func handleMarketsInfo(ctx context.Context, nodeApi api.StorageMiner) error { - deals, err := nodeApi.MarketListIncompleteDeals(ctx) - if err != nil { - return err - } - - type dealStat struct { - count, verifCount int - bytes, verifBytes uint64 - } - dsAdd := func(ds *dealStat, deal storagemarket.MinerDeal) { - ds.count++ - ds.bytes += uint64(deal.Proposal.PieceSize) - if deal.Proposal.VerifiedDeal { - ds.verifCount++ - ds.verifBytes += uint64(deal.Proposal.PieceSize) - } - } - - showDealStates := map[storagemarket.StorageDealStatus]struct{}{ - storagemarket.StorageDealActive: {}, - storagemarket.StorageDealAcceptWait: {}, - storagemarket.StorageDealReserveProviderFunds: {}, - storagemarket.StorageDealProviderFunding: {}, - storagemarket.StorageDealTransferring: {}, - storagemarket.StorageDealValidating: {}, - storagemarket.StorageDealStaged: {}, - storagemarket.StorageDealAwaitingPreCommit: {}, - storagemarket.StorageDealSealing: {}, - storagemarket.StorageDealPublish: {}, - storagemarket.StorageDealCheckForAcceptance: {}, - storagemarket.StorageDealPublishing: {}, - } - - var total dealStat - perState := map[storagemarket.StorageDealStatus]*dealStat{} - for _, deal := range deals { - if _, ok := showDealStates[deal.State]; !ok { - continue - } - if perState[deal.State] == nil { - perState[deal.State] = new(dealStat) - } - - dsAdd(&total, deal) - dsAdd(perState[deal.State], deal) - } - - type wstr struct { - str string - status storagemarket.StorageDealStatus - } - sorted := make([]wstr, 0, len(perState)) - for status, stat := range perState { - st := strings.TrimPrefix(storagemarket.DealStates[status], "StorageDeal") - sorted = append(sorted, wstr{ - str: fmt.Sprintf(" %s:\t%d\t\t%s\t(Verified: %d\t%s)\n", st, stat.count, types.SizeStr(types.NewInt(stat.bytes)), stat.verifCount, types.SizeStr(types.NewInt(stat.verifBytes))), - status: status, - }, - ) - } - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].status == storagemarket.StorageDealActive || sorted[j].status == storagemarket.StorageDealActive { - return sorted[i].status == storagemarket.StorageDealActive - } - return sorted[i].status > sorted[j].status - }) - - fmt.Println() - fmt.Printf("Storage Deals: %d, %s\n", total.count, types.SizeStr(types.NewInt(total.bytes))) - - tw := tabwriter.NewWriter(os.Stdout, 1, 1, 1, ' ', 0) - for _, e := range sorted { - _, _ = tw.Write([]byte(e.str)) - } - - _ = tw.Flush() - fmt.Println() - - retrievals, err := nodeApi.MarketListRetrievalDeals(ctx) - if err != nil { - return xerrors.Errorf("getting retrieval deal list: %w", err) - } - - var retrComplete dealStat - for _, retrieval := range retrievals { - if retrieval.Status == retrievalmarket.DealStatusCompleted { - retrComplete.count++ - retrComplete.bytes += retrieval.TotalSent - } - } - - fmt.Printf("Retrieval Deals (complete): %d, %s\n", retrComplete.count, types.SizeStr(types.NewInt(retrComplete.bytes))) - - fmt.Println() - - return nil -} - type stateMeta struct { i int col color.Attribute diff --git a/cmd/lotus-miner/info_all.go b/cmd/lotus-miner/info_all.go index bf6d7e4b9..2cf07385c 100644 --- a/cmd/lotus-miner/info_all.go +++ b/cmd/lotus-miner/info_all.go @@ -150,11 +150,6 @@ var infoAllCmd = &cli.Command{ } } - fmt.Println("\n#: Retrieval Deals") - if err := retrievalDealsListCmd.Action(cctx); err != nil { - fmt.Println("ERROR: ", err) - } - fmt.Println("\n#: Data Transfers") { fs := &flag.FlagSet{} @@ -198,8 +193,17 @@ var infoAllCmd = &cli.Command{ } fmt.Println("\n#: Sector List") - if err := sectorsListCmd.Action(cctx); err != nil { - fmt.Println("ERROR: ", err) + { + fs := &flag.FlagSet{} + for _, f := range sectorsListCmd.Flags { + if err := f.Apply(fs); err != nil { + fmt.Println("ERROR: ", err) + } + } + + if err := sectorsListCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { + fmt.Println("ERROR: ", err) + } } fmt.Println("\n#: Storage Sector List") diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index 0bdfac79e..c109e85b9 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -246,7 +245,7 @@ var initCmd = &cli.Command{ return xerrors.Errorf("marshaling storage config: %w", err) } - if err := ioutil.WriteFile(filepath.Join(lr.Path(), "sectorstore.json"), b, 0644); err != nil { + if err := os.WriteFile(filepath.Join(lr.Path(), "sectorstore.json"), b, 0644); err != nil { return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(lr.Path(), "sectorstore.json"), err) } @@ -292,7 +291,7 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string return xerrors.Errorf("expanding preseal dir: %w", err) } - b, err := ioutil.ReadFile(metadata) + b, err := os.ReadFile(metadata) if err != nil { return xerrors.Errorf("reading preseal metadata: %w", err) } diff --git a/cmd/lotus-miner/init_restore.go b/cmd/lotus-miner/init_restore.go index 618825a27..7e28729bb 100644 --- a/cmd/lotus-miner/init_restore.go +++ b/cmd/lotus-miner/init_restore.go @@ -3,7 +3,6 @@ package main import ( "context" "encoding/json" - "io/ioutil" "os" "github.com/docker/go-units" @@ -59,7 +58,7 @@ var restoreCmd = &cli.Command{ return xerrors.Errorf("expanding storage config path: %w", err) } - cfb, err := ioutil.ReadFile(cf) + cfb, err := os.ReadFile(cf) if err != nil { return xerrors.Errorf("reading storage config: %w", err) } @@ -189,7 +188,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi return } - ff, err := config.FromFile(cf, rcfg) + ff, err := config.FromFile(cf, config.SetDefault(func() (interface{}, error) { return rcfg, nil })) if err != nil { cerr = xerrors.Errorf("loading config: %w", err) return diff --git a/cmd/lotus-miner/market.go b/cmd/lotus-miner/market.go index 706e49236..29eb662a7 100644 --- a/cmd/lotus-miner/market.go +++ b/cmd/lotus-miner/market.go @@ -16,7 +16,6 @@ import ( tm "github.com/buger/goterm" "github.com/docker/go-units" - "github.com/fatih/color" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" "github.com/libp2p/go-libp2p/core/peer" @@ -25,7 +24,7 @@ import ( "golang.org/x/xerrors" cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer" + datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" @@ -779,11 +778,6 @@ var transfersListCmd = &cli.Command{ Aliases: []string{"v"}, Usage: "print verbose transfer details", }, - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, &cli.BoolFlag{ Name: "completed", Usage: "show completed data transfers", @@ -798,10 +792,6 @@ var transfersListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - api, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { return err diff --git a/cmd/lotus-miner/proving.go b/cmd/lotus-miner/proving.go index 9ae8bdd48..3ecc58ba7 100644 --- a/cmd/lotus-miner/proving.go +++ b/cmd/lotus-miner/proving.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/proof" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" @@ -640,7 +641,46 @@ It will not send any messages to the chain.`, if err != nil { return err } - jr, err := json.Marshal(res) + + //convert sector information into easily readable information + type PoStPartition struct { + Index uint64 + Skipped []uint64 + } + type SubmitWindowedPoStParams struct { + Deadline uint64 + Partitions []PoStPartition + Proofs []proof.PoStProof + ChainCommitEpoch abi.ChainEpoch + ChainCommitRand abi.Randomness + } + var postParams []SubmitWindowedPoStParams + for _, i := range res { + var postParam SubmitWindowedPoStParams + postParam.Deadline = i.Deadline + + // Initialize the postParam.Partitions slice with the same length as i.Partitions + postParam.Partitions = make([]PoStPartition, len(i.Partitions)) + + for id, part := range i.Partitions { + postParam.Partitions[id].Index = part.Index + count, err := part.Skipped.Count() + if err != nil { + return err + } + sectors, err := part.Skipped.All(count) + if err != nil { + return err + } + postParam.Partitions[id].Skipped = sectors + } + postParam.Proofs = i.Proofs + postParam.ChainCommitEpoch = i.ChainCommitEpoch + postParam.ChainCommitRand = i.ChainCommitRand + postParams = append(postParams, postParam) + } + + jr, err := json.MarshalIndent(postParams, "", " ") if err != nil { return err } diff --git a/cmd/lotus-miner/retrieval-deals.go b/cmd/lotus-miner/retrieval-deals.go index 9fa943f4f..42b0fa1f6 100644 --- a/cmd/lotus-miner/retrieval-deals.go +++ b/cmd/lotus-miner/retrieval-deals.go @@ -3,13 +3,11 @@ package main import ( "fmt" "os" - "sort" "text/tabwriter" "github.com/docker/go-units" "github.com/urfave/cli/v2" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/types" @@ -21,7 +19,6 @@ var retrievalDealsCmd = &cli.Command{ Usage: "Manage retrieval deals and related configuration", Subcommands: []*cli.Command{ retrievalDealSelectionCmd, - retrievalDealsListCmd, retrievalSetAskCmd, retrievalGetAskCmd, }, @@ -124,48 +121,6 @@ var retrievalDealSelectionRejectCmd = &cli.Command{ }, } -var retrievalDealsListCmd = &cli.Command{ - Name: "list", - Usage: "List all active retrieval deals for this miner", - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetMarketsAPI(cctx) - if err != nil { - return err - } - defer closer() - - deals, err := api.MarketListRetrievalDeals(lcli.DaemonContext(cctx)) - if err != nil { - return err - } - - sort.Slice(deals, func(i, j int) bool { - return deals[i].ID < deals[j].ID - }) - - w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - - _, _ = fmt.Fprintf(w, "Receiver\tDealID\tPayload\tState\tPricePerByte\tBytesSent\tMessage\n") - - for _, deal := range deals { - payloadCid := deal.PayloadCID.String() - - _, _ = fmt.Fprintf(w, - "%s\t%d\t%s\t%s\t%s\t%d\t%s\n", - deal.Receiver.String(), - deal.ID, - "..."+payloadCid[len(payloadCid)-8:], - retrievalmarket.DealStatuses[deal.Status], - deal.PricePerByte.String(), - deal.TotalSent, - deal.Message, - ) - } - - return w.Flush() - }, -} - var retrievalSetAskCmd = &cli.Command{ Name: "set-ask", Usage: "Configure the provider's retrieval ask", diff --git a/cmd/lotus-miner/sealing.go b/cmd/lotus-miner/sealing.go index 4810b9ab9..b2f4dcab9 100644 --- a/cmd/lotus-miner/sealing.go +++ b/cmd/lotus-miner/sealing.go @@ -45,18 +45,7 @@ func workersCmd(sealing bool) *cli.Command { return &cli.Command{ Name: "workers", Usage: "list workers", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -218,21 +207,12 @@ var sealingJobsCmd = &cli.Command{ Name: "jobs", Usage: "list running jobs", Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, &cli.BoolFlag{ Name: "show-ret-done", Usage: "show returned but not consumed calls", }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index fc5fdcef6..8d3a4c884 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -9,6 +9,7 @@ import ( "sort" "strconv" "strings" + "sync" "time" "github.com/docker/go-units" @@ -29,15 +30,19 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/lib/strle" "github.com/filecoin-project/lotus/lib/tablewriter" sealing "github.com/filecoin-project/lotus/storage/pipeline" ) +const parallelSectorChecks = 300 + var sectorsCmd = &cli.Command{ Name: "sectors", Usage: "interact with sector store", @@ -51,7 +56,6 @@ var sectorsCmd = &cli.Command{ sectorPreCommitsCmd, sectorsCheckExpireCmd, sectorsExpiredCmd, - sectorsRenewCmd, sectorsExtendCmd, sectorsTerminateCmd, sectorsRemoveCmd, @@ -121,8 +125,8 @@ var sectorsStatusCmd = &cli.Command{ defer closer() ctx := lcli.ReqContext(cctx) - if !cctx.Args().Present() { - return fmt.Errorf("must specify sector number to get status of") + if cctx.NArg() != 1 { + return lcli.IncorrectNumArgs(cctx) } id, err := strconv.ParseUint(cctx.Args().First(), 10, 64) @@ -278,12 +282,6 @@ var sectorsListCmd = &cli.Command{ Usage: "show removed sectors", Aliases: []string{"r"}, }, - &cli.BoolFlag{ - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - Aliases: []string{"c"}, - }, &cli.BoolFlag{ Name: "fast", Usage: "don't show on-chain info for better performance", @@ -313,13 +311,15 @@ var sectorsListCmd = &cli.Command{ Usage: "only show sectors which aren't in the 'Proving' state", Aliases: []string{"u"}, }, + &cli.Int64Flag{ + Name: "check-parallelism", + Usage: "number of parallel requests to make for checking sector states", + Value: parallelSectorChecks, + }, }, Action: func(cctx *cli.Context) error { - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) + // http mode allows for parallel json decoding/encoding, which was a bottleneck here + minerApi, closer, err := lcli.GetStorageMinerAPI(cctx, cliutil.StorageMinerUseHttp) if err != nil { return err } @@ -418,16 +418,37 @@ var sectorsListCmd = &cli.Command{ fast := cctx.Bool("fast") - for _, s := range list { - st, err := minerApi.SectorsStatus(ctx, s, !fast) - if err != nil { + throttle := make(chan struct{}, cctx.Int64("check-parallelism")) + + slist := make([]result.Result[api.SectorInfo], len(list)) + var wg sync.WaitGroup + for i, s := range list { + throttle <- struct{}{} + wg.Add(1) + go func(i int, s abi.SectorNumber) { + defer wg.Done() + defer func() { <-throttle }() + r := result.Wrap(minerApi.SectorsStatus(ctx, s, !fast)) + if r.Error != nil { + r.Value.SectorID = s + } + slist[i] = r + }(i, s) + } + wg.Wait() + + for _, rsn := range slist { + if rsn.Error != nil { tw.Write(map[string]interface{}{ - "ID": s, + "ID": rsn.Value.SectorID, "Error": err, }) continue } + st := rsn.Value + s := st.SectorID + if !showRemoved && st.State == api.SectorState(sealing.Removed) { continue } @@ -689,7 +710,7 @@ type PseudoExtendSectorExpirationParams struct { Extensions []PseudoExpirationExtension } -func NewPseudoExtendParams(p *miner.ExtendSectorExpirationParams) (*PseudoExtendSectorExpirationParams, error) { +func NewPseudoExtendParams(p *miner.ExtendSectorExpiration2Params) (*PseudoExtendSectorExpirationParams, error) { res := PseudoExtendSectorExpirationParams{} for _, ext := range p.Extensions { scount, err := ext.Sectors.Count() @@ -743,14 +764,14 @@ func ArrayToString(array []uint64) string { return strings.Join(sarray, ",") } -func getSectorsFromFile(filePath string) ([]uint64, error) { +func getSectorsFromFile(filePath string) ([]abi.SectorNumber, error) { file, err := os.Open(filePath) if err != nil { return nil, err } scanner := bufio.NewScanner(file) - sectors := make([]uint64, 0) + sectors := make([]abi.SectorNumber, 0) for scanner.Scan() { line := scanner.Text() @@ -760,7 +781,7 @@ func getSectorsFromFile(filePath string) ([]uint64, error) { return nil, xerrors.Errorf("could not parse %s as sector id: %s", line, err) } - sectors = append(sectors, id) + sectors = append(sectors, abi.SectorNumber(id)) } if err = file.Close(); err != nil { @@ -770,9 +791,19 @@ func getSectorsFromFile(filePath string) ([]uint64, error) { return sectors, nil } -var sectorsRenewCmd = &cli.Command{ - Name: "renew", - Usage: "Renew expiring sectors while not exceeding each sector's max life", +func SectorNumsToBitfield(sectors []abi.SectorNumber) bitfield.BitField { + var numbers []uint64 + for _, sector := range sectors { + numbers = append(numbers, uint64(sector)) + } + + return bitfield.NewFromSet(numbers) +} + +var sectorsExtendCmd = &cli.Command{ + Name: "extend", + Usage: "Extend expiring sectors while not exceeding each sector's max life", + ArgsUsage: "", Flags: []cli.Flag{ &cli.Int64Flag{ Name: "from", @@ -803,6 +834,10 @@ var sectorsRenewCmd = &cli.Command{ Name: "only-cc", Usage: "only extend CC sectors (useful for making sector ready for snap upgrade)", }, + &cli.BoolFlag{ + Name: "drop-claims", + Usage: "drop claims for sectors that can be extended, but only by dropping some of their verified power claims", + }, &cli.Int64Flag{ Name: "tolerance", Usage: "don't try to extend sectors by fewer than this number of epochs, defaults to 7 days", @@ -815,11 +850,11 @@ var sectorsRenewCmd = &cli.Command{ }, &cli.Int64Flag{ Name: "max-sectors", - Usage: "the maximum number of sectors contained in each message message", + Usage: "the maximum number of sectors contained in each message", }, &cli.BoolFlag{ Name: "really-do-it", - Usage: "pass this flag to really renew sectors, otherwise will only print out json representation of parameters", + Usage: "pass this flag to really extend sectors, otherwise will only print out json representation of parameters", }, }, Action: func(cctx *cli.Context) error { @@ -870,7 +905,8 @@ var sectorsRenewCmd = &cli.Command{ } tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) - mas, err := lminer.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) + adtStore := adt.WrapStore(ctx, cbor.NewCborStore(tbs)) + mas, err := lminer.Load(adtStore, mact) if err != nil { return err } @@ -896,8 +932,7 @@ var sectorsRenewCmd = &cli.Command{ return err } - excludeSet := make(map[uint64]struct{}) - + excludeSet := make(map[abi.SectorNumber]struct{}) if cctx.IsSet("exclude") { excludeSectors, err := getSectorsFromFile(cctx.String("exclude")) if err != nil { @@ -909,28 +944,24 @@ var sectorsRenewCmd = &cli.Command{ } } - var sis []*miner.SectorOnChainInfo - - if cctx.IsSet("sector-file") { - sectors, err := getSectorsFromFile(cctx.String("sector-file")) - if err != nil { - return err + var sectors []abi.SectorNumber + if cctx.Args().Present() { + if cctx.IsSet("sector-file") { + return xerrors.Errorf("sector-file specified along with command line params") } - for _, id := range sectors { - if _, exclude := excludeSet[id]; exclude { - continue + for i, s := range cctx.Args().Slice() { + id, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector %d: %w", i, err) } - si, found := activeSectorsInfo[abi.SectorNumber(id)] - if !found { - return xerrors.Errorf("sector %d is not active", id) - } - if len(si.DealIDs) > 0 && cctx.Bool("only-cc") { - continue - } - - sis = append(sis, si) + sectors = append(sectors, abi.SectorNumber(id)) + } + } else if cctx.IsSet("sector-file") { + sectors, err = getSectorsFromFile(cctx.String("sector-file")) + if err != nil { + return err } } else { from := currEpoch + 120 @@ -945,19 +976,28 @@ var sectorsRenewCmd = &cli.Command{ } for _, si := range activeSet { - if len(si.DealIDs) > 0 && cctx.Bool("only-cc") { - continue - } - if si.Expiration >= from && si.Expiration <= to { - if _, exclude := excludeSet[uint64(si.SectorNumber)]; !exclude { - sis = append(sis, si) - } + sectors = append(sectors, si.SectorNumber) } } } - extensions := map[lminer.SectorLocation]map[abi.ChainEpoch][]uint64{} + var sis []*miner.SectorOnChainInfo + for _, id := range sectors { + if _, exclude := excludeSet[id]; exclude { + continue + } + + si, found := activeSectorsInfo[id] + if !found { + return xerrors.Errorf("sector %d is not active", id) + } + if len(si.DealIDs) > 0 && cctx.Bool("only-cc") { + continue + } + + sis = append(sis, si) + } withinTolerance := func(a, b abi.ChainEpoch) bool { diff := a - b @@ -968,6 +1008,7 @@ var sectorsRenewCmd = &cli.Command{ return diff <= abi.ChainEpoch(cctx.Int64("tolerance")) } + extensions := map[lminer.SectorLocation]map[abi.ChainEpoch][]abi.SectorNumber{} for _, si := range sis { extension := abi.ChainEpoch(cctx.Int64("extension")) newExp := si.Expiration + extension @@ -997,63 +1038,144 @@ var sectorsRenewCmd = &cli.Command{ es, found := extensions[*l] if !found { - ne := make(map[abi.ChainEpoch][]uint64) - ne[newExp] = []uint64{uint64(si.SectorNumber)} + ne := make(map[abi.ChainEpoch][]abi.SectorNumber) + ne[newExp] = []abi.SectorNumber{si.SectorNumber} extensions[*l] = ne } else { added := false for exp := range es { if withinTolerance(newExp, exp) { - es[exp] = append(es[exp], uint64(si.SectorNumber)) + es[exp] = append(es[exp], si.SectorNumber) added = true break } } if !added { - es[newExp] = []uint64{uint64(si.SectorNumber)} + es[newExp] = []abi.SectorNumber{si.SectorNumber} } } } - var params []miner.ExtendSectorExpirationParams + verifregAct, err := fullApi.StateGetActor(ctx, builtin.VerifiedRegistryActorAddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to lookup verifreg actor: %w", err) + } - p := miner.ExtendSectorExpirationParams{} + verifregSt, err := verifreg.Load(adtStore, verifregAct) + if err != nil { + return xerrors.Errorf("failed to load verifreg state: %w", err) + } + + claimsMap, err := verifregSt.GetClaims(maddr) + if err != nil { + return xerrors.Errorf("failed to lookup claims for miner: %w", err) + } + + claimIdsBySector, err := verifregSt.GetClaimIdsBySector(maddr) + if err != nil { + return xerrors.Errorf("failed to lookup claim IDs by sector: %w", err) + } + + sectorsMax, err := policy.GetAddressedSectorsMax(nv) + if err != nil { + return err + } + + declMax, err := policy.GetDeclarationsMax(nv) + if err != nil { + return err + } + + addrSectors := sectorsMax + if cctx.Int("max-sectors") != 0 { + addrSectors = cctx.Int("max-sectors") + if addrSectors > sectorsMax { + return xerrors.Errorf("the specified max-sectors exceeds the maximum limit") + } + } + + var params []miner.ExtendSectorExpiration2Params + + p := miner.ExtendSectorExpiration2Params{} scount := 0 for l, exts := range extensions { for newExp, numbers := range exts { - scount += len(numbers) - var addrSectors int - sectorsMax, err := policy.GetAddressedSectorsMax(nv) - if err != nil { - return err - } - if cctx.Int("max-sectors") == 0 { - addrSectors = sectorsMax - } else { - addrSectors = cctx.Int("max-sectors") - if addrSectors > sectorsMax { - return xerrors.Errorf("the specified max-sectors exceeds the maximum limit") + sectorsWithoutClaimsToExtend := bitfield.New() + var sectorsWithClaims []miner.SectorClaim + for _, sectorNumber := range numbers { + claimIdsToMaintain := make([]verifreg.ClaimId, 0) + claimIdsToDrop := make([]verifreg.ClaimId, 0) + cannotExtendSector := false + claimIds, ok := claimIdsBySector[sectorNumber] + // Nothing to check, add to ccSectors + if !ok { + sectorsWithoutClaimsToExtend.Set(uint64(sectorNumber)) + } else { + for _, claimId := range claimIds { + claim, ok := claimsMap[claimId] + if !ok { + return xerrors.Errorf("failed to find claim for claimId %d", claimId) + } + claimExpiration := claim.TermStart + claim.TermMax + // can be maintained in the extended sector + if claimExpiration > newExp { + claimIdsToMaintain = append(claimIdsToMaintain, claimId) + } else { + sectorInfo, ok := activeSectorsInfo[sectorNumber] + if !ok { + return xerrors.Errorf("failed to find sector in active sector set: %w", err) + } + if !cctx.Bool("drop-claims") || + // FIP-0045 requires the claim minimum duration to have passed + currEpoch <= (claim.TermStart+claim.TermMin) || + // FIP-0045 requires the sector to be in its last 30 days of life + (currEpoch <= sectorInfo.Expiration-builtin.EndOfLifeClaimDropPeriod) { + fmt.Printf("skipping sector %d because claim %d does not live long enough \n", sectorNumber, claimId) + cannotExtendSector = true + break + } + + claimIdsToDrop = append(claimIdsToDrop, claimId) + } + } + if cannotExtendSector { + continue + } + + if len(claimIdsToMaintain)+len(claimIdsToDrop) != 0 { + sectorsWithClaims = append(sectorsWithClaims, miner.SectorClaim{ + SectorNumber: sectorNumber, + MaintainClaims: claimIdsToMaintain, + DropClaims: claimIdsToDrop, + }) + } } } - declMax, err := policy.GetDeclarationsMax(nv) + sectorsWithoutClaimsCount, err := sectorsWithoutClaimsToExtend.Count() if err != nil { - return err - } - if scount > addrSectors || len(p.Extensions) == declMax { - params = append(params, p) - p = miner.ExtendSectorExpirationParams{} - scount = len(numbers) + return xerrors.Errorf("failed to count cc sectors: %w", err) } - p.Extensions = append(p.Extensions, miner.ExpirationExtension{ - Deadline: l.Deadline, - Partition: l.Partition, - Sectors: bitfield.NewFromSet(numbers), - NewExpiration: newExp, + sectorsInDecl := int(sectorsWithoutClaimsCount) + len(sectorsWithClaims) + scount += sectorsInDecl + + if scount > addrSectors || len(p.Extensions) >= declMax { + params = append(params, p) + p = miner.ExtendSectorExpiration2Params{} + scount = sectorsInDecl + } + + p.Extensions = append(p.Extensions, miner.ExpirationExtension2{ + Deadline: l.Deadline, + Partition: l.Partition, + Sectors: SectorNumsToBitfield(numbers), + SectorsWithClaims: sectorsWithClaims, + NewExpiration: newExp, }) + } } @@ -1083,7 +1205,7 @@ var sectorsRenewCmd = &cli.Command{ } scount += int(count) } - fmt.Printf("Renewing %d sectors: ", scount) + fmt.Printf("Extending %d sectors: ", scount) stotal += scount if !cctx.Bool("really-do-it") { @@ -1097,8 +1219,7 @@ var sectorsRenewCmd = &cli.Command{ return err } - fmt.Println() - fmt.Println(string(data)) + fmt.Println("\n", string(data)) continue } @@ -1110,7 +1231,7 @@ var sectorsRenewCmd = &cli.Command{ smsg, err := fullApi.MpoolPushMessage(ctx, &types.Message{ From: mi.Worker, To: maddr, - Method: builtin.MethodsMiner.ExtendSectorExpiration, + Method: builtin.MethodsMiner.ExtendSectorExpiration2, Value: big.Zero(), Params: sp, }, spec) @@ -1121,252 +1242,7 @@ var sectorsRenewCmd = &cli.Command{ fmt.Println(smsg.Cid()) } - fmt.Printf("%d sectors renewed\n", stotal) - - return nil - }, -} - -var sectorsExtendCmd = &cli.Command{ - Name: "extend", - Usage: "Extend sector expiration", - ArgsUsage: "", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "new-expiration", - Usage: "new expiration epoch", - Required: false, - }, - &cli.BoolFlag{ - Name: "v1-sectors", - Usage: "renews all v1 sectors up to the maximum possible lifetime", - Required: false, - }, - &cli.Int64Flag{ - Name: "tolerance", - Value: 20160, - Usage: "when extending v1 sectors, don't try to extend sectors by fewer than this number of epochs", - Required: false, - }, - &cli.Int64Flag{ - Name: "expiration-ignore", - Value: 120, - Usage: "when extending v1 sectors, skip sectors whose current expiration is less than epochs from now", - Required: false, - }, - &cli.Int64Flag{ - Name: "expiration-cutoff", - Usage: "when extending v1 sectors, skip sectors whose current expiration is more than epochs from now (infinity if unspecified)", - Required: false, - }, - &cli.StringFlag{}, - }, - Action: func(cctx *cli.Context) error { - - api, nCloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer nCloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - var params []miner.ExtendSectorExpirationParams - - if cctx.Bool("v1-sectors") { - - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - - nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return err - } - - extensions := map[lminer.SectorLocation]map[abi.ChainEpoch][]uint64{} - - // are given durations within tolerance epochs - withinTolerance := func(a, b abi.ChainEpoch) bool { - diff := a - b - if diff < 0 { - diff = b - a - } - - return diff <= abi.ChainEpoch(cctx.Int64("tolerance")) - } - - sis, err := api.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner sector infos: %w", err) - } - - for _, si := range sis { - if si.SealProof >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 { - continue - } - - if si.Expiration < (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-ignore"))) { - continue - } - - if cctx.IsSet("expiration-cutoff") { - if si.Expiration > (head.Height() + abi.ChainEpoch(cctx.Int64("expiration-cutoff"))) { - continue - } - } - - ml := policy.GetSectorMaxLifetime(si.SealProof, nv) - // if the sector's missing less than "tolerance" of its maximum possible lifetime, don't bother extending it - if withinTolerance(si.Expiration-si.Activation, ml) { - continue - } - - // Set the new expiration to 48 hours less than the theoretical maximum lifetime - newExp := ml - (miner.WPoStProvingPeriod * 2) + si.Activation - if withinTolerance(si.Expiration, newExp) || si.Expiration >= newExp { - continue - } - - p, err := api.StateSectorPartition(ctx, maddr, si.SectorNumber, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting sector location for sector %d: %w", si.SectorNumber, err) - } - - if p == nil { - return xerrors.Errorf("sector %d not found in any partition", si.SectorNumber) - } - - es, found := extensions[*p] - if !found { - ne := make(map[abi.ChainEpoch][]uint64) - ne[newExp] = []uint64{uint64(si.SectorNumber)} - extensions[*p] = ne - } else { - added := false - for exp := range es { - if withinTolerance(exp, newExp) && newExp >= exp && exp > si.Expiration { - es[exp] = append(es[exp], uint64(si.SectorNumber)) - added = true - break - } - } - - if !added { - es[newExp] = []uint64{uint64(si.SectorNumber)} - } - } - } - - p := miner.ExtendSectorExpirationParams{} - scount := 0 - - for l, exts := range extensions { - for newExp, numbers := range exts { - scount += len(numbers) - addressedMax, err := policy.GetAddressedSectorsMax(nv) - if err != nil { - return xerrors.Errorf("failed to get addressed sectors max") - } - declMax, err := policy.GetDeclarationsMax(nv) - if err != nil { - return xerrors.Errorf("failed to get declarations max") - } - if scount > addressedMax || len(p.Extensions) == declMax { - params = append(params, p) - p = miner.ExtendSectorExpirationParams{} - scount = len(numbers) - } - - p.Extensions = append(p.Extensions, miner.ExpirationExtension{ - Deadline: l.Deadline, - Partition: l.Partition, - Sectors: bitfield.NewFromSet(numbers), - NewExpiration: newExp, - }) - } - } - - // if we have any sectors, then one last append is needed here - if scount != 0 { - params = append(params, p) - } - - } else { - if !cctx.Args().Present() || !cctx.IsSet("new-expiration") { - return xerrors.Errorf("must pass at least one sector number and new expiration") - } - sectors := map[lminer.SectorLocation][]uint64{} - - for i, s := range cctx.Args().Slice() { - id, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return xerrors.Errorf("could not parse sector %d: %w", i, err) - } - - p, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(id), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting sector location for sector %d: %w", id, err) - } - - if p == nil { - return xerrors.Errorf("sector %d not found in any partition", id) - } - - sectors[*p] = append(sectors[*p], id) - } - - p := miner.ExtendSectorExpirationParams{} - for l, numbers := range sectors { - - // TODO: Dedup with above loop - p.Extensions = append(p.Extensions, miner.ExpirationExtension{ - Deadline: l.Deadline, - Partition: l.Partition, - Sectors: bitfield.NewFromSet(numbers), - NewExpiration: abi.ChainEpoch(cctx.Int64("new-expiration")), - }) - } - - params = append(params, p) - } - - if len(params) == 0 { - fmt.Println("nothing to extend") - return nil - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - for i := range params { - sp, aerr := actors.SerializeParams(¶ms[i]) - if aerr != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Worker, - To: maddr, - Method: builtin.MethodsMiner.ExtendSectorExpiration, - - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push message: %w", err) - } - - fmt.Println(smsg.Cid()) - } + fmt.Printf("%d sectors extended\n", stotal) return nil }, @@ -1387,9 +1263,6 @@ var sectorsTerminateCmd = &cli.Command{ sectorsTerminatePendingCmd, }, Action: func(cctx *cli.Context) error { - if !cctx.Bool("really-do-it") { - return xerrors.Errorf("pass --really-do-it to confirm this action") - } minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) if err != nil { return err @@ -1400,6 +1273,10 @@ var sectorsTerminateCmd = &cli.Command{ return lcli.IncorrectNumArgs(cctx) } + if !cctx.Bool("really-do-it") { + return xerrors.Errorf("pass --really-do-it to confirm this action") + } + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) if err != nil { return xerrors.Errorf("could not parse sector number: %w", err) @@ -1620,8 +1497,14 @@ var sectorsStartSealCmd = &cli.Command{ var sectorsSealDelayCmd = &cli.Command{ Name: "set-seal-delay", - Usage: "Set the time, in minutes, that a new sector waits for deals before sealing starts", - ArgsUsage: "", + Usage: "Set the time (in minutes) that a new sector waits for deals before sealing starts", + ArgsUsage: "