Merge pull request #10727 from filecoin-project/asr/release-1230

feat: release 1.23.0
This commit is contained in:
Aayush Rajasekaran 2023-04-23 14:40:18 -04:00 committed by GitHub
commit d1d4b35adf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
318 changed files with 10519 additions and 6320 deletions

View File

@ -7,14 +7,19 @@ executors:
golang:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.18.8
- image: cimg/go:1.19.7
resource_class: medium+
golang-2xl:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.19.7
resource_class: 2xlarge
ubuntu:
docker:
- image: ubuntu:20.04
commands:
prepare:
build-platform-specific:
parameters:
linux:
default: true
@ -31,22 +36,13 @@ commands:
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- when:
condition: <<parameters.linux>>
steps:
- run:
name: Check Go Version
command: |
v=`go version | { read _ _ v _; echo ${v#go}; }`
if [[ $v != `cat GO_VERSION_MIN` ]]; then
echo "GO_VERSION_MIN file does not match the go version being used."
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
exit 1
fi
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
- run: sudo apt-get install python-is-python3
- install-ubuntu-deps
- check-go-version
- when:
condition: <<parameters.darwin>>
steps:
@ -67,8 +63,7 @@ commands:
name: Install Rust
command: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
- run: git submodule sync
- run: git submodule update --init
- run: make deps lotus
download-params:
steps:
- restore_cache:
@ -99,12 +94,43 @@ commands:
name: fetch all tags
command: |
git fetch --all
install-ubuntu-deps:
steps:
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version:
steps:
- run: |
v=`go version | { read _ _ v _; echo ${v#go}; }`
if [[ $v != `cat GO_VERSION_MIN` ]]; then
echo "GO_VERSION_MIN file does not match the go version being used."
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
exit 1
fi
jobs:
build:
executor: golang
working_directory: ~/lotus
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- install-ubuntu-deps
- check-go-version
- run: make deps lotus
- persist_to_workspace:
root: ~/
paths:
- "lotus"
mod-tidy-check:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go mod tidy -v
- run:
name: Check git diff
@ -115,13 +141,14 @@ jobs:
test:
description: |
Run tests with gotestsum.
working_directory: ~/lotus
parameters: &test-params
executor:
type: executor
default: golang
go-test-flags:
type: string
default: "-timeout 30m"
default: "-timeout 20m"
description: Flags passed to go test.
target:
type: string
@ -130,20 +157,21 @@ jobs:
proofs-log-test:
type: string
default: "0"
get-params:
type: boolean
default: false
suite:
type: string
default: unit
description: Test suite name to report to CircleCI.
gotestsum-format:
type: string
default: standard-verbose
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
executor: << parameters.executor >>
steps:
- prepare
- run:
command: make deps lotus
no_output_timeout: 30m
- install-ubuntu-deps
- attach_workspace:
at: ~/
- when:
condition: << parameters.get-params >>
steps:
- download-params
- run:
name: go test
@ -155,12 +183,11 @@ jobs:
mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
gotestsum \
--format << parameters.gotestsum-format >> \
--format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
-- \
<< parameters.go-test-flags >> \
<< parameters.target >>
--packages="<< parameters.target >>" \
-- << parameters.go-test-flags >>
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
@ -168,6 +195,7 @@ jobs:
path: /tmp/test-artifacts/<< parameters.suite >>.json
test-conformance:
working_directory: ~/lotus
description: |
Run tests using a corpus of interoperable test vectors for Filecoin
implementations to test their correctness and compliance with the Filecoin
@ -183,10 +211,9 @@ jobs:
submodule is used.
executor: << parameters.executor >>
steps:
- prepare
- run:
command: make deps lotus
no_output_timeout: 30m
- install-ubuntu-deps
- attach_workspace:
at: ~/
- download-params
- when:
condition:
@ -229,7 +256,7 @@ jobs:
build-linux-amd64:
executor: golang
steps:
- prepare
- build-platform-specific
- run: make lotus lotus-miner lotus-worker
- run:
name: check tag and version output match
@ -248,7 +275,7 @@ jobs:
macos:
xcode: "13.4.1"
steps:
- prepare:
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: amd64
@ -272,7 +299,7 @@ jobs:
resource_class: filecoin-project/self-hosted-m1
steps:
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
- prepare:
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: arm64
@ -330,16 +357,18 @@ jobs:
gofmt:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- run:
command: "! go fmt ./... 2>&1 | read"
gen-check:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- run: make deps
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: make gen
@ -349,32 +378,29 @@ jobs:
docs-check:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
- run: make deps
- run: make docsgen
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
lint: &lint
lint-all:
description: |
Run golangci-lint.
working_directory: ~/lotus
parameters:
executor:
type: executor
default: golang
concurrency:
type: string
default: '2'
description: |
Concurrency used to run linters. Defaults to 2 because NumCPU is not
aware of container CPU limits.
args:
type: string
default: ''
@ -382,17 +408,14 @@ jobs:
Arguments to pass to golangci-lint
executor: << parameters.executor >>
steps:
- prepare
- run:
command: make deps
no_output_timeout: 30m
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run:
name: Lint
command: |
golangci-lint run -v --timeout 2m \
--concurrency << parameters.concurrency >> << parameters.args >>
lint-all:
<<: *lint
golangci-lint run -v --timeout 10m \
--concurrency 4 << parameters.args >>
build-docker:
description: >
@ -494,432 +517,541 @@ jobs:
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
workflows:
version: 2.1
ci:
jobs:
- build
- lint-all:
concurrency: "16" # expend all docker 2xlarge CPUs.
- mod-tidy-check
- gofmt
- gen-check
- docs-check
requires:
- build
- mod-tidy-check:
requires:
- build
- gofmt:
requires:
- build
- gen-check:
requires:
- build
- docs-check:
requires:
- build
- test:
name: test-itest-api
requires:
- build
suite: itest-api
target: "./itests/api_test.go"
- test:
name: test-itest-batch_deal
requires:
- build
suite: itest-batch_deal
target: "./itests/batch_deal_test.go"
- test:
name: test-itest-ccupgrade
requires:
- build
suite: itest-ccupgrade
target: "./itests/ccupgrade_test.go"
- test:
name: test-itest-cli
requires:
- build
suite: itest-cli
target: "./itests/cli_test.go"
- test:
name: test-itest-deadlines
requires:
- build
suite: itest-deadlines
target: "./itests/deadlines_test.go"
- test:
name: test-itest-deals_512mb
requires:
- build
suite: itest-deals_512mb
target: "./itests/deals_512mb_test.go"
- test:
name: test-itest-deals_anycid
requires:
- build
suite: itest-deals_anycid
target: "./itests/deals_anycid_test.go"
- test:
name: test-itest-deals_concurrent
requires:
- build
suite: itest-deals_concurrent
target: "./itests/deals_concurrent_test.go"
executor: golang-2xl
- test:
name: test-itest-deals_invalid_utf8_label
requires:
- build
suite: itest-deals_invalid_utf8_label
target: "./itests/deals_invalid_utf8_label_test.go"
- test:
name: test-itest-deals_max_staging_deals
requires:
- build
suite: itest-deals_max_staging_deals
target: "./itests/deals_max_staging_deals_test.go"
- test:
name: test-itest-deals_offline
requires:
- build
suite: itest-deals_offline
target: "./itests/deals_offline_test.go"
- test:
name: test-itest-deals_padding
requires:
- build
suite: itest-deals_padding
target: "./itests/deals_padding_test.go"
- test:
name: test-itest-deals_partial_retrieval_dm-level
requires:
- build
suite: itest-deals_partial_retrieval_dm-level
target: "./itests/deals_partial_retrieval_dm-level_test.go"
- test:
name: test-itest-deals_partial_retrieval
requires:
- build
suite: itest-deals_partial_retrieval
target: "./itests/deals_partial_retrieval_test.go"
- test:
name: test-itest-deals_power
requires:
- build
suite: itest-deals_power
target: "./itests/deals_power_test.go"
- test:
name: test-itest-deals_pricing
requires:
- build
suite: itest-deals_pricing
target: "./itests/deals_pricing_test.go"
- test:
name: test-itest-deals_publish
requires:
- build
suite: itest-deals_publish
target: "./itests/deals_publish_test.go"
- test:
name: test-itest-deals_remote_retrieval
requires:
- build
suite: itest-deals_remote_retrieval
target: "./itests/deals_remote_retrieval_test.go"
- test:
name: test-itest-deals_retry_deal_no_funds
requires:
- build
suite: itest-deals_retry_deal_no_funds
target: "./itests/deals_retry_deal_no_funds_test.go"
- test:
name: test-itest-deals
requires:
- build
suite: itest-deals
target: "./itests/deals_test.go"
- test:
name: test-itest-decode_params
requires:
- build
suite: itest-decode_params
target: "./itests/decode_params_test.go"
- test:
name: test-itest-dup_mpool_messages
requires:
- build
suite: itest-dup_mpool_messages
target: "./itests/dup_mpool_messages_test.go"
- test:
name: test-itest-eth_account_abstraction
requires:
- build
suite: itest-eth_account_abstraction
target: "./itests/eth_account_abstraction_test.go"
- test:
name: test-itest-eth_api
requires:
- build
suite: itest-eth_api
target: "./itests/eth_api_test.go"
- test:
name: test-itest-eth_balance
requires:
- build
suite: itest-eth_balance
target: "./itests/eth_balance_test.go"
- test:
name: test-itest-eth_block_hash
requires:
- build
suite: itest-eth_block_hash
target: "./itests/eth_block_hash_test.go"
- test:
name: test-itest-eth_bytecode
requires:
- build
suite: itest-eth_bytecode
target: "./itests/eth_bytecode_test.go"
- test:
name: test-itest-eth_config
requires:
- build
suite: itest-eth_config
target: "./itests/eth_config_test.go"
- test:
name: test-itest-eth_conformance
requires:
- build
suite: itest-eth_conformance
target: "./itests/eth_conformance_test.go"
- test:
name: test-itest-eth_deploy
requires:
- build
suite: itest-eth_deploy
target: "./itests/eth_deploy_test.go"
- test:
name: test-itest-eth_fee_history
requires:
- build
suite: itest-eth_fee_history
target: "./itests/eth_fee_history_test.go"
- test:
name: test-itest-eth_filter
requires:
- build
suite: itest-eth_filter
target: "./itests/eth_filter_test.go"
- test:
name: test-itest-eth_hash_lookup
requires:
- build
suite: itest-eth_hash_lookup
target: "./itests/eth_hash_lookup_test.go"
- test:
name: test-itest-eth_transactions
requires:
- build
suite: itest-eth_transactions
target: "./itests/eth_transactions_test.go"
- test:
name: test-itest-fevm_address
requires:
- build
suite: itest-fevm_address
target: "./itests/fevm_address_test.go"
- test:
name: test-itest-fevm_events
requires:
- build
suite: itest-fevm_events
target: "./itests/fevm_events_test.go"
- test:
name: test-itest-fevm
requires:
- build
suite: itest-fevm
target: "./itests/fevm_test.go"
- test:
name: test-itest-gas_estimation
requires:
- build
suite: itest-gas_estimation
target: "./itests/gas_estimation_test.go"
- test:
name: test-itest-gateway
requires:
- build
suite: itest-gateway
target: "./itests/gateway_test.go"
- test:
name: test-itest-get_messages_in_ts
requires:
- build
suite: itest-get_messages_in_ts
target: "./itests/get_messages_in_ts_test.go"
- test:
name: test-itest-lite_migration
requires:
- build
suite: itest-lite_migration
target: "./itests/lite_migration_test.go"
- test:
name: test-itest-lookup_robust_address
requires:
- build
suite: itest-lookup_robust_address
target: "./itests/lookup_robust_address_test.go"
- test:
name: test-itest-mempool
requires:
- build
suite: itest-mempool
target: "./itests/mempool_test.go"
- test:
name: test-itest-migration
requires:
- build
suite: itest-migration
target: "./itests/migration_test.go"
- test:
name: test-itest-mpool_msg_uuid
requires:
- build
suite: itest-mpool_msg_uuid
target: "./itests/mpool_msg_uuid_test.go"
- test:
name: test-itest-mpool_push_with_uuid
requires:
- build
suite: itest-mpool_push_with_uuid
target: "./itests/mpool_push_with_uuid_test.go"
- test:
name: test-itest-multisig
requires:
- build
suite: itest-multisig
target: "./itests/multisig_test.go"
- test:
name: test-itest-net
requires:
- build
suite: itest-net
target: "./itests/net_test.go"
- test:
name: test-itest-nonce
requires:
- build
suite: itest-nonce
target: "./itests/nonce_test.go"
- test:
name: test-itest-path_detach_redeclare
requires:
- build
suite: itest-path_detach_redeclare
target: "./itests/path_detach_redeclare_test.go"
- test:
name: test-itest-path_type_filters
requires:
- build
suite: itest-path_type_filters
target: "./itests/path_type_filters_test.go"
- test:
name: test-itest-paych_api
requires:
- build
suite: itest-paych_api
target: "./itests/paych_api_test.go"
- test:
name: test-itest-paych_cli
requires:
- build
suite: itest-paych_cli
target: "./itests/paych_cli_test.go"
- test:
name: test-itest-pending_deal_allocation
requires:
- build
suite: itest-pending_deal_allocation
target: "./itests/pending_deal_allocation_test.go"
- test:
name: test-itest-raft_messagesigner
requires:
- build
suite: itest-raft_messagesigner
target: "./itests/raft_messagesigner_test.go"
- test:
name: test-itest-remove_verifreg_datacap
requires:
- build
suite: itest-remove_verifreg_datacap
target: "./itests/remove_verifreg_datacap_test.go"
- test:
name: test-itest-sdr_upgrade
requires:
- build
suite: itest-sdr_upgrade
target: "./itests/sdr_upgrade_test.go"
- test:
name: test-itest-sector_finalize_early
requires:
- build
suite: itest-sector_finalize_early
target: "./itests/sector_finalize_early_test.go"
- test:
name: test-itest-sector_import_full
requires:
- build
suite: itest-sector_import_full
target: "./itests/sector_import_full_test.go"
- test:
name: test-itest-sector_import_simple
requires:
- build
suite: itest-sector_import_simple
target: "./itests/sector_import_simple_test.go"
- test:
name: test-itest-sector_make_cc_avail
requires:
- build
suite: itest-sector_make_cc_avail
target: "./itests/sector_make_cc_avail_test.go"
- test:
name: test-itest-sector_miner_collateral
requires:
- build
suite: itest-sector_miner_collateral
target: "./itests/sector_miner_collateral_test.go"
- test:
name: test-itest-sector_numassign
requires:
- build
suite: itest-sector_numassign
target: "./itests/sector_numassign_test.go"
- test:
name: test-itest-sector_pledge
requires:
- build
suite: itest-sector_pledge
target: "./itests/sector_pledge_test.go"
- test:
name: test-itest-sector_prefer_no_upgrade
requires:
- build
suite: itest-sector_prefer_no_upgrade
target: "./itests/sector_prefer_no_upgrade_test.go"
- test:
name: test-itest-sector_revert_available
requires:
- build
suite: itest-sector_revert_available
target: "./itests/sector_revert_available_test.go"
- test:
name: test-itest-sector_terminate
requires:
- build
suite: itest-sector_terminate
target: "./itests/sector_terminate_test.go"
- test:
name: test-itest-sector_unseal
requires:
- build
suite: itest-sector_unseal
target: "./itests/sector_unseal_test.go"
- test:
name: test-itest-self_sent_txn
requires:
- build
suite: itest-self_sent_txn
target: "./itests/self_sent_txn_test.go"
- test:
name: test-itest-splitstore
requires:
- build
suite: itest-splitstore
target: "./itests/splitstore_test.go"
- test:
name: test-itest-tape
requires:
- build
suite: itest-tape
target: "./itests/tape_test.go"
- test:
name: test-itest-verifreg
requires:
- build
suite: itest-verifreg
target: "./itests/verifreg_test.go"
- test:
name: test-itest-wdpost_config
requires:
- build
suite: itest-wdpost_config
target: "./itests/wdpost_config_test.go"
- test:
name: test-itest-wdpost_dispute
requires:
- build
suite: itest-wdpost_dispute
target: "./itests/wdpost_dispute_test.go"
- test:
name: test-itest-wdpost_no_miner_storage
requires:
- build
suite: itest-wdpost_no_miner_storage
target: "./itests/wdpost_no_miner_storage_test.go"
- test:
name: test-itest-wdpost
requires:
- build
suite: itest-wdpost
target: "./itests/wdpost_test.go"
get-params: true
- test:
name: test-itest-wdpost_worker_config
requires:
- build
suite: itest-wdpost_worker_config
target: "./itests/wdpost_worker_config_test.go"
executor: golang-2xl
- test:
name: test-itest-worker
requires:
- build
suite: itest-worker
target: "./itests/worker_test.go"
executor: golang-2xl
- test:
name: test-itest-worker_upgrade
requires:
- build
suite: itest-worker_upgrade
target: "./itests/worker_upgrade_test.go"
- test:
name: test-unit-cli
requires:
- build
suite: utest-unit-cli
target: "./cli/... ./cmd/... ./api/..."
get-params: true
- test:
name: test-unit-node
requires:
- build
suite: utest-unit-node
target: "./node/..."
- test:
name: test-unit-rest
requires:
- build
suite: utest-unit-rest
target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..."
executor: golang-2xl
- test:
name: test-unit-storage
requires:
- build
suite: utest-unit-storage
target: "./storage/... ./extern/..."
- test:
go-test-flags: "-run=TestMulticoreSDR"
requires:
- build
suite: multicore-sdr-check
target: "./storage/sealer/ffiwrapper"
proofs-log-test: "1"
- test-conformance:
requires:
- build
suite: conformance
target: "./conformance"

View File

@ -7,14 +7,19 @@ executors:
golang:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.18.8
- image: cimg/go:1.19.7
resource_class: medium+
golang-2xl:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.19.7
resource_class: 2xlarge
ubuntu:
docker:
- image: ubuntu:20.04
commands:
prepare:
build-platform-specific:
parameters:
linux:
default: true
@ -31,22 +36,13 @@ commands:
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- when:
condition: <<parameters.linux>>
steps:
- run:
name: Check Go Version
command: |
v=`go version | { read _ _ v _; echo ${v#go}; }`
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
echo "GO_VERSION_MIN file does not match the go version being used."
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
exit 1
fi
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
- run: sudo apt-get install python-is-python3
- install-ubuntu-deps
- check-go-version
- when:
condition: <<parameters.darwin>>
steps:
@ -67,8 +63,7 @@ commands:
name: Install Rust
command: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
- run: git submodule sync
- run: git submodule update --init
- run: make deps lotus
download-params:
steps:
- restore_cache:
@ -99,12 +94,43 @@ commands:
name: fetch all tags
command: |
git fetch --all
install-ubuntu-deps:
steps:
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version:
steps:
- run: |
v=`go version | { read _ _ v _; echo ${v#go}; }`
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
echo "GO_VERSION_MIN file does not match the go version being used."
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
exit 1
fi
jobs:
build:
executor: golang
working_directory: ~/lotus
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- install-ubuntu-deps
- check-go-version
- run: make deps lotus
- persist_to_workspace:
root: ~/
paths:
- "lotus"
mod-tidy-check:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go mod tidy -v
- run:
name: Check git diff
@ -115,13 +141,14 @@ jobs:
test:
description: |
Run tests with gotestsum.
working_directory: ~/lotus
parameters: &test-params
executor:
type: executor
default: golang
go-test-flags:
type: string
default: "-timeout 30m"
default: "-timeout 20m"
description: Flags passed to go test.
target:
type: string
@ -130,20 +157,21 @@ jobs:
proofs-log-test:
type: string
default: "0"
get-params:
type: boolean
default: false
suite:
type: string
default: unit
description: Test suite name to report to CircleCI.
gotestsum-format:
type: string
default: standard-verbose
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
executor: << parameters.executor >>
steps:
- prepare
- run:
command: make deps lotus
no_output_timeout: 30m
- install-ubuntu-deps
- attach_workspace:
at: ~/
- when:
condition: << parameters.get-params >>
steps:
- download-params
- run:
name: go test
@ -155,12 +183,11 @@ jobs:
mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
gotestsum \
--format << parameters.gotestsum-format >> \
--format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
-- \
<< parameters.go-test-flags >> \
<< parameters.target >>
--packages="<< parameters.target >>" \
-- << parameters.go-test-flags >>
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
@ -168,6 +195,7 @@ jobs:
path: /tmp/test-artifacts/<< parameters.suite >>.json
test-conformance:
working_directory: ~/lotus
description: |
Run tests using a corpus of interoperable test vectors for Filecoin
implementations to test their correctness and compliance with the Filecoin
@ -183,10 +211,9 @@ jobs:
submodule is used.
executor: << parameters.executor >>
steps:
- prepare
- run:
command: make deps lotus
no_output_timeout: 30m
- install-ubuntu-deps
- attach_workspace:
at: ~/
- download-params
- when:
condition:
@ -229,7 +256,7 @@ jobs:
build-linux-amd64:
executor: golang
steps:
- prepare
- build-platform-specific
- run: make lotus lotus-miner lotus-worker
- run:
name: check tag and version output match
@ -248,7 +275,7 @@ jobs:
macos:
xcode: "13.4.1"
steps:
- prepare:
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: amd64
@ -272,7 +299,7 @@ jobs:
resource_class: filecoin-project/self-hosted-m1
steps:
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
- prepare:
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: arm64
@ -330,16 +357,18 @@ jobs:
gofmt:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- run:
command: "! go fmt ./... 2>&1 | read"
gen-check:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- run: make deps
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: make gen
@ -349,32 +378,29 @@ jobs:
docs-check:
executor: golang
working_directory: ~/lotus
steps:
- prepare
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
- run: make deps
- run: make docsgen
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
lint: &lint
lint-all:
description: |
Run golangci-lint.
working_directory: ~/lotus
parameters:
executor:
type: executor
default: golang
concurrency:
type: string
default: '2'
description: |
Concurrency used to run linters. Defaults to 2 because NumCPU is not
aware of container CPU limits.
args:
type: string
default: ''
@ -382,17 +408,14 @@ jobs:
Arguments to pass to golangci-lint
executor: << parameters.executor >>
steps:
- prepare
- run:
command: make deps
no_output_timeout: 30m
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run:
name: Lint
command: |
golangci-lint run -v --timeout 2m \
--concurrency << parameters.concurrency >> << parameters.args >>
lint-all:
<<: *lint
golangci-lint run -v --timeout 10m \
--concurrency 4 << parameters.args >>
build-docker:
description: >
@ -494,37 +517,61 @@ jobs:
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
workflows:
version: 2.1
ci:
jobs:
- build
- lint-all:
concurrency: "16" # expend all docker 2xlarge CPUs.
- mod-tidy-check
- gofmt
- gen-check
- docs-check
requires:
- build
- mod-tidy-check:
requires:
- build
- gofmt:
requires:
- build
- gen-check:
requires:
- build
- docs-check:
requires:
- build
[[- range $file := .ItestFiles -]]
[[ with $name := $file | stripSuffix ]]
- test:
name: test-itest-[[ $name ]]
requires:
- build
suite: itest-[[ $name ]]
target: "./itests/[[ $file ]]"
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]]
executor: golang-2xl
[[- end]]
[[- if (eq $name "wdpost")]]
get-params: true
[[end]]
[[- end -]]
[[- end ]][[- end]]
[[range $suite, $pkgs := .UnitSuites]]
[[- range $suite, $pkgs := .UnitSuites]]
- test:
name: test-[[ $suite ]]
requires:
- build
suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]"
[[if eq $suite "unit-cli"]]get-params: true[[end]]
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
[[- end]]
- test:
go-test-flags: "-run=TestMulticoreSDR"
requires:
- build
suite: multicore-sdr-check
target: "./storage/sealer/ffiwrapper"
proofs-log-test: "1"
- test-conformance:
requires:
- build
suite: conformance
target: "./conformance"

View File

@ -9,15 +9,9 @@ body:
options:
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
required: true
- label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions).
required: true
- label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead.
required: true
- label: This is **not** an enhancement request. If it is, please file a [improvement suggestion](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fenhancement&template=enhancement.yml) instead.
required: true
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
required: true
- label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), or the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these.
- label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these.
required: true
- label: I did not make any code changes to lotus.
required: false
@ -28,19 +22,11 @@ body:
options:
- label: lotus daemon - chain sync
required: false
- label: lotus miner - mining and block production
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false
- label: lotus miner/worker - sealing
required: false
- label: lotus miner - proving(WindowPoSt)
required: false
- label: lotus miner/market - storage deal
required: false
- label: lotus miner/market - retrieval deal
required: false
- label: lotus miner/market - data transfer
required: false
- label: lotus client
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus JSON-RPC API
required: false
@ -56,22 +42,33 @@ body:
description: Enter the output of `lotus version` and `lotus-miner version` if applicable.
placeholder: |
e.g.
Daemon:1.11.0-rc2+debug+git.0519cd371.dirty+api1.3.0
Local: lotus version 1.11.0-rc2+debug+git.0519cd371.dirty
Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
validations:
required: true
- type: textarea
id: ReproSteps
attributes:
label: Repro Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false
- type: textarea
id: Description
attributes:
label: Describe the Bug
description: |
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
* What you were doding when you experienced the bug?
* What you were doing when you experienced the bug?
* Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
* What is the expected behaviour?
* For sealing issues, include the output of `lotus-miner sectors status --log <sectorId>` for the failed sector(s).
* For proving issues, include the output of `lotus-miner proving` info.
* For deal making issues, include the output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question.
validations:
required: true
- type: textarea
@ -83,18 +80,6 @@ body:
Please provide debug logs of the problem, remember you can get set log level control for:
* lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://lotus.filecoin.io/lotus/configure/defaults/#log-level-control).
* lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level
If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem.
If you don't provide detailed logs when you raise the issue it will almost certainly be the first request we make before furthur diagnosing the problem.
validations:
required: true
- type: textarea
id: RepoSteps
attributes:
label: Repo Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: Ask a question about Lotus or get support
url: https://github.com/filecoin-project/lotus/discussions/new/choose
about: Ask a question or request support for using Lotus
- name: Filecoin protocol feature or enhancement
url: https://github.com/filecoin-project/FIPs/discussions/new/choose
about: Write a discussion in the Filecoin Improvement Proposal repo

View File

@ -7,13 +7,7 @@ body:
label: Checklist
description: Please check off the following boxes before continuing to create an improvement suggestion!
options:
- label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md).
required: true
- label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead.
required: true
- label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`.
required: true
- label: I **have** a specific, actionable, and well motivated improvement to propose.
- label: I **have** a specific, actionable, and well motivated improvement to an existing lotus feature.
required: true
- type: checkboxes
attributes:
@ -22,19 +16,11 @@ body:
options:
- label: lotus daemon - chain sync
required: false
- label: lotus miner - mining and block production
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false
- label: lotus miner/worker - sealing
required: false
- label: lotus miner - proving(WindowPoSt)
required: false
- label: lotus miner/market - storage deal
required: false
- label: lotus miner/market - retrieval deal
required: false
- label: lotus miner/market - data transfer
required: false
- label: lotus client
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus JSON-RPC API
required: false
@ -45,9 +31,17 @@ body:
- type: textarea
id: request
attributes:
label: Improvement Suggestion
description: A clear and concise description of what the motivation or the current problem is and what is the suggested improvement?
placeholder: Ex. Currently lotus... However, as a storage provider, I'd like...
label: Enhancement Suggestion
description: A clear and concise description of the suggested enhancement?
placeholder: Ex. Currently lotus... However it would be great if [enhancement] was implemented... With the ability to...
validations:
required: true
- type: textarea
id: request
attributes:
label: Use-Case
description: How would this enhancement help you?
placeholder: Ex. With the [enhancement] node operators would be able to... For Storage Providers it would enable...
validations:
required: true

View File

@ -7,8 +7,6 @@ body:
label: Checklist
description: Please check off the following boxes before continuing to create a new feature request!
options:
- label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md).
required: true
- label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`.
required: true
- label: I **have** a specific, actionable, and well motivated feature request to propose.
@ -20,19 +18,11 @@ body:
options:
- label: lotus daemon - chain sync
required: false
- label: lotus miner - mining and block production
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false
- label: lotus miner/worker - sealing
required: false
- label: lotus miner - proving(WindowPoSt)
required: false
- label: lotus miner/market - storage deal
required: false
- label: lotus miner/market - retrieval deal
required: false
- label: lotus miner/market - data transfer
required: false
- label: lotus client
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus JSON-RPC API
required: false
@ -56,7 +46,7 @@ body:
validations:
required: true
- type: textarea
id: alternates
id: alternatives
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
@ -69,4 +59,3 @@ body:
description: Add any other context, design docs or screenshots about the feature request here.
validations:
required: false

View File

@ -0,0 +1,83 @@
name: "Bug Report - developer/service provider"
description: "Bug report template about FEVM/FVM for developers/service providers"
labels: [need/triage, kind/bug, area/fevm]
body:
- type: checkboxes
attributes:
label: Checklist
description: Please check off the following boxes before continuing to file a bug report!
options:
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
required: true
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
required: true
- label: I did not make any code changes to lotus.
required: false
- type: checkboxes
attributes:
label: Lotus component
description: Please select the lotus component you are filing a bug for
options:
- label: lotus Ethereum RPC
required: false
- label: lotus FVM - Lotus FVM interactions
required: false
- label: FEVM tooling
required: false
- label: Other
required: false
- type: textarea
id: version
attributes:
label: Lotus Version
render: text
description: Enter the output of `lotus version` if applicable.
placeholder: |
e.g.
Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
validations:
required: true
- type: textarea
id: repro
attributes:
label: Repro Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false
- type: textarea
id: Description
attributes:
label: Describe the Bug
description: |
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
* What you were doing when you experienced the bug? What are you trying to build?
* Any *error* messages and logs you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
* What is the expected behaviour? Links to the actual code?
validations:
required: true
- type: textarea
id: toolingInfo
attributes:
label: Tooling
render: text
description: |
What kind of tooling are you using:
* Are you using ether.js, Alchemy, Hardhat, etc.
validations:
required: true
- type: textarea
id: extraInfo
attributes:
label: Configuration Options
render: text
description: |
Please provide your updated FEVM related configuration options, or custome enviroment variables related to Lotus FEVM
* lotus: use `lotus config updated` to get your configuration options, and copy the [FEVM] section
validations:
required: true

View File

@ -1,5 +1,445 @@
# Lotus changelog
# v1.23.0 / 2023-04-21
This is the stable feature release for the upcoming MANDATORY network upgrade at `2023-04-27T13:00:00Z`, epoch `2809800`. This feature release delivers the nv19 Lighting and nv20 Thunder network upgrade for mainnet, and includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
## ☢️ Upgrade Warnings ☢️
Please read carefully through the **upgrade warnings** section if you are upgrading from a v1.20.X release, or the v1.22.0 release. If you are upgrading from a v1.21.0-rcX these warnings should be familiar to you.
- Starting from this release, the SplitStore feature is automatically activated on new nodes. However, for existing Lotus users, you need to explicitly configure SplitStore by uncommenting the `EnableSplitstore` option in your `config.toml` file. To enable SplitStore, set `EnableSplitstore=true`, and to disable it, set `EnableSplitstore=false`. **It's important to note that your Lotus node will not start unless this configuration is properly set. Set it to false if you are running a full archival node!**
- This feature release requires a **minimum Go version of v1.19.7 or higher to successfully build Lotus**. Additionally, Go version v1.20 and higher is now also supported.
- **Storage Providers:** The proofs libraries now have CUDA enabled by default, which requires you to install (CUDA)[https://lotus.filecoin.io/tutorials/lotus-miner/cuda/] if you haven't already done so. If you prefer to use OpenCL on your GPUs instead, you can use the `FFI_USE_OPENCL=1` flag when building from source. On the other hand, if you want to disable GPUs altogether, you can use the `FFI_NO_GPU=1` environment variable when building from source.
- **Storage Providers:** The `lotus-miner sectors extend` command has been refactored to the functionality of `lotus-miner sectors renew`.
- **Exchanges/Node operators/RPC-providers::** Execution traces (returned from `lotus state exec-trace`, `lotus state replay`, etc.), has changed to account for changes introduced by the by the FVM. **Please make sure to read the `Execution trace format change` section carefully, as these are interface breaking changes**
- **Syncing issues:** If you have been struggling with syncing issues in normal operations you can try to adjust the amount of threads used for more concurrent FMV execution through via the `LOTUS_FVM_CONCURRENCY` enviroment variable. It is set to 4 threads by default. Recommended formula for concurrency == YOUR_RAM/4 , but max during a network upgrade is 24. If you are a Storage Provider and are pushing many messages within a short period of time, exporting `LOTUS_SKIP_APPLY_TS_MESSAGE_CALL_WITH_GAS=1` will also help with keeping in sync.
- **Catching up from a Snapshot:** Users have noticed that catching up sync from a snapshot is taking a lot longer these day. This is largely related to the built-in market actor consuming a lot of computational demand for block validation. A FIP for a short-term mitigation for this is currently in Last Call and will be included network version 19 upgrade if accepted. You [can read the FIP here.](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0060.md)
## Highlights
### Execution Trace Format Changes
Execution traces (returned from `lotus state exec-trace`, `lotus state replay`, etc.), has changed to account for changes introduced by the FVM. Specifically:
- The `Msg` field no longer matches the Filecoin message format as many of the message fields didn't make sense in on-chain sub-calls. Instead, it now has the fields `To`, `From`, `Value`, `Method`, `Params`, and `ParamsCodec` where `ParamsCodec` is a new field indicating the IPLD codec of the parameters.
- Importantly, the `Msg.CID` field has been removed. This field is still present in top-level invocation results, just not inside the execution trace itself.
- The `MsgRct` field no longer includes a `GasUsed` field and now has a `ReturnCodec` field to indicating the IPLD codec of the return value.
- The `Error` and `Duration` fields have been removed as these are not set by the FVM. The top-level message "invocation result" retains the `Error` and `Duration` fields, they've only been removed from the trace itself.
- Gas Charges no longer include "virtual" gas fields (those starting with `v...`) or source location information (`loc`) as neither field is set by the FVM.
A note on "codecs": FVM parameters and return values are IPLD blocks where the "codec" specifies the data encoding. The codec will generally be one of:
- `0x51`, `0x71` - CBOR or DagCBOR. You should generally treat these as equivalent.
- `0x55` - Raw bytes.
- `0x00` - Nothing. If the codec is `0x00`, the parameter and/or return value should be empty and should be treated as "void" (not specified).
<details>
<summary>
Old <code>ExecutionTrace</code>:
</summary>
```json
{
"Msg": {
"Version": 0,
"To": "f01234",
"From": "f04321",
"Nonce": 1,
"Value": "0",
"GasLimit": 0,
"GasFeeCap": "1234",
"GasPremium": "1234",
"Method": 42,
"Params": "<base64-data-or-null>",
"CID": {
"/": "bafyxyz....."
},
},
"MsgRct": {
"ExitCode": 0,
"Return": "<base64-data-or-null>",
"GasUsed": 12345,
},
"Error": "",
"Duration": 568191845,
"GasCharges": [
{
"Name": "OnMethodInvocation",
"loc": null,
"tg": 23856,
"cg": 23856,
"sg": 0,
"vtg": 0,
"vcg": 0,
"vsg": 0,
"tt": 0
},
{
"Name": "wasm_exec",
"loc": null,
"tg": 1764,
"cg": 1764,
"sg": 0,
"vtg": 0,
"vcg": 0,
"vsg": 0,
"tt": 0
},
{
"Name": "OnSyscall",
"loc": null,
"tg": 14000,
"cg": 14000,
"sg": 0,
"vtg": 0,
"vcg": 0,
"vsg": 0,
"tt": 0
},
],
"Subcalls": [
{
"Msg": { },
"MsgRct": { },
"Error": "",
"Duration": 1235,
"GasCharges": [],
"Subcalls": [],
},
]
}
```
</details>
<details>
<summary>
New <code>ExecutionTrace</code>:
</summary>
```json
{
"Msg": {
"To": "f01234",
"From": "f04321",
"Value": "0",
"Method": 42,
"Params": "<base64-data-or-null>",
"ParamsCodec": 81
},
"MsgRct": {
"ExitCode": 0,
"Return": "<base64-data-or-null>",
"ReturnCodec": 81
},
"GasCharges": [
{
"Name": "OnMethodInvocation",
"loc": null,
"tg": 23856,
"cg": 23856,
"tt": 0
},
{
"Name": "wasm_exec",
"loc": null,
"tg": 1764,
"cg": 1764,
"sg": 0,
"tt": 0
},
{
"Name": "OnSyscall",
"loc": null,
"tg": 14000,
"cg": 14000,
"sg": 0,
"tt": 0
},
],
"Subcalls": [
{
"Msg": { },
"MsgRct": { },
"GasCharges": [],
"Subcalls": [],
},
]
}
```
</details>
**SplitStore**
This feature release introduces numerous improvements and fixes to tackle SplitStore related issues that has been reported. With this feature release SplitStore is automatically activated by default on new nodes. However, for existing Lotus users, you need to explicitly configure SplitStore by uncommenting the `EnableSplitstore` option in your `config.toml` file. To enable SplitStore, set `EnableSplitstore=true`, and to disable it, set `EnableSplitstore=false`. **It's important to note that your Lotus node will not start unless this configuration is properly set. Set it to false if you are running a full archival node!**
SplitStore also has some new configuration settings that you can set in your config.toml file:
- `HotstoreMaxSpaceTarget` suggests the max allowed space (in bytes) the hotstore can take.
- `HotstoreMaxSpaceThreshold` a moving GC will be triggered when total moving size exceeds this threshold (in bytes).
- `HotstoreMaxSpaceSafetyBuffer` a safety buffer to prevent moving GC from an overflowing disk.
The SplitStore also has two new commands:
- `lotus chain prune hot` is a much less resource-intensive GC and is best suited for situations where you don't have the spare disk space for a full GC.
- `lotus chain prune hot-moving` will run a full moving garbage collection of the hotstore. This commands create a new hotstore before deleting the old one so you need working room in the hotstore directory. The current size of a fully GC'd hotstore is around 295 GiB so you need to make sure you have at least that available.
You can read more about the new SplitStore commands in [the documentation](https://lotus.filecoin.io/lotus/configure/splitstore/#manual-chain-store-garbage-collection).
**RPC API improvements**
This feature release includes all the RPC API improvements made in the Lotus v1.20.x patch releases. It includes an updated FFI that sets the FVM parallelism to 4 by default.
Node operators with higher memory specs can experiment with setting LOTUS_FVM_CONCURRENCY to higher values, up to 48, to allow for more concurrent FVM execution.
**Experimental scheduler assigners**
In this release there are four new expirmental scheduler assigners:
- The `experiment-spread-qcount` - similar to the spread assigner but also takes into account task counts which are in running/preparing/queued states.
- The `experiment-spread-tasks` - similar to the spread assigner, but counts running tasks on a per-task-type basis
- The `experiment-spread-tasks-qcount` - similar to the spread assigner, but also takes into account task counts which are in running/preparing/queued states, as well as counting running tasks on a per-task-type basis. Check the results for this assigner on ([storage-only lotus-workers here](https://github.com/filecoin-project/lotus/issues/8566#issuecomment-1446978856)).
- The `experiment-random` - In each schedule loop the assinger figures a set of all workers which can handle the task and then picks a random one. Check the results for this assigner on ([storage-only lotus-workers here](https://github.com/filecoin-project/lotus/issues/8566#issuecomment-1447064218)).
**Graceful shutdown of lotus-workers**
We have cleaned up some commands in the `lotus-worker` to make it less confusing how to gracefully shutting down a `lotus-worker` while there are incoming sealing tasks in the pipeline. To shut down a `lotus-worker` gracefully:
1. `lotus-worker tasks disable --all` and wait for the worker to finish processing its current tasks.
2. `lotus-worker stop` to detach it and do maintenance/upgrades.
**CLI speedups**
The `lotus-miner sector list` is now running in parallel - which should speed up the process from anywhere between 2x-10x+. You can tune it additionally with the `check-parallelism` option in the command. The `Lotus-Miner info` command also has a large speed improvement, as calls to the lotus legacy market has been removed.
## New features
- feat: splitstore: Pause compaction when out of sync ([filecoin-project/lotus/#10641](https://github.com/filecoin-project/lotus/pull/10641))
- Pause the SplitStore compaction if the node is out of sync. Resumes the compation when its back in sync.
- feat: splitstore: limit moving gc threads (#10621) ([filecoin-project/lotus/#10621](https://github.com/filecoin-project/lotus/pull/10621))
- Makes moving gc less likely to cause node falling out of sync.
- feat: splitstore: Update config default value (#10605) ([filecoin-project/lotus/#10605](https://github.com/filecoin-project/lotus/pull/10605))
- Sets Splitstore HotStoreMaxSpaceTarget config to 650GB as default
- feat: splitstore: Splitstore enabled by default (#10429) ([filecoin-project/lotus#10429](https://github.com/filecoin-project/lotus/pull/10429))
- Enables SplitStore by default on new Lotus nodes. Existing Lotus users need to explicitly configure
- feat: splitstore: Configure max space used by hotstore and GC makes best effort to respect ([filecoin-project/lotus#10391](https://github.com/filecoin-project/lotus/pull/10391))
- Adds three new configs for setting the maximum allowed space the hotstore can take.
- feat: splitstore: Badger GC of hotstore command ([filecoin-project/lotus#10387](https://github.com/filecoin-project/lotus/pull/10387))
- Adds a `lotus chain prune hot` command, to run the garbage collection of the hotstore in a user driven way.
- feat: sched: Assigner experiments ([filecoin-project/lotus#10356](https://github.com/filecoin-project/lotus/pull/10356))
- Introduces experimental scheduler assigners that works better for setups that uses storage-only lotus-workers.
- fix: wdpost: disabled post worker handling ([filecoin-project/lotus#10394](https://github.com/filecoin-project/lotus/pull/10394))
- Improved scheduling logic for Proof-of-SpaceTime workers.
- feat: cli: list claims and remove expired claims ([filecoin-project/lotus#9875](https://github.com/filecoin-project/lotus/pull/9875))
- Adds a command to list claims made by a provider `lotus filplus list-claims`. And `lotus filplus remove-expired-claims` to remove expired claims.
- feat: cli: make sectors list much faster ([filecoin-project/lotus#10202](https://github.com/filecoin-project/lotus/pull/10202))
- Makes `lotus-miner sector list` checks run in parallel.
- feat: cli: Add an EVM command to fetch a contract's bytecode ([filecoin-project/lotus#10443](https://github.com/filecoin-project/lotus/pull/10443))
- Adds an `lotus evm bytecode` command to fetch a contract's bytecode.
- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x (#10416) ([filecoin-project/lotus#10416](https://github.com/filecoin-project/lotus/pull/10416))
- Reduces replacement message fee logic to help include update message replacements from developers using Ethereum tools like MetaMask.
- feat: update renew-sectors with FIP-0045 logic ([filecoin-project/lotus#10328](https://github.com/filecoin-project/lotus/pull/10328))
- Updates the `lotus-miner sectors extend` with FIP-0045 logic to include the ability to drop claims and set the maximum number of messages contained in a message.
- feat: IPC: Abstract common consensus functions and consensus interface ([filecoin-project/lotus#9481](https://github.com/filecoin-project/lotus/pull/9481))
- Add eudico's consensus interface to Lotus and implement EC behind that interface. This abstraction is the stepping-stone for Mir's integration.
- fix: worker: add all tasks flag ([filecoin-project/lotus#10232](https://github.com/filecoin-project/lotus/pull/10232))
- Adds an `all` flag for the `lotus-worker tasks enable/disable` cmds.
- feat:shed:add cid to cbor serialization command ([filecoin-project/lotus#10032](https://github.com/filecoin-project/lotus/pull/10032))
- Adds two `lotus-shed` commands, `lotus-shed cid bytes` and `lotus-shed cid cbor` to serialize cid to cbor and cid to bytes.
- feat: add toolshed commands to inspect statetree size ([filecoin-project/lotus#9982](https://github.com/filecoin-project/lotus/pull/9982))
- Adds two commands, `lotus-shed stat-actor` and `lotus-shed stat-obj` that work with an offline lotus repo to report dag size stats.
- feat: shed: encode address to bytes ([filecoin-project/lotus#10105](https://github.com/filecoin-project/lotus/pull/10105))
- Adds a `lotus-shed address encode` for encoding a filecoin address to hex bytes.
- feat: chain: export-range ([filecoin-project/lotus#10145](https://github.com/filecoin-project/lotus/pull/10145))
- Adds a `lotus chain export-range` command that can create archival-grade ranged exports of the chain as quickly as possible.
- feat: stmgr: cache migrated stateroots ([filecoin-project/lotus#10282](https://github.com/filecoin-project/lotus/pull/10282))
- Cache network migration results to avoid running migrations twice.
- feat: shed: Add a tool to read data from sectors ([filecoin-project/lotus#10169](https://github.com/filecoin-project/lotus/pull/10169))
- Adds a lotus-shed sectors read command that extract data from sectors from a running lotus-miner deployment.
- feat: cli: Refactor renew and remove extend ([filecoin-project/lotus#9920](https://github.com/filecoin-project/lotus/pull/9920))
- Refactors the `lotus-miner sectors extend` command to have the functionality of `lotus-miner sectors renew`. The `lotus-miner sectors renew` command has been deprecated.
- feat: shed: Add beneficiary commands ([filecoin-project/lotus#10037](https://github.com/filecoin-project/lotus/pull/10037))
- Adds the beneficiary address command to `lotus-shed`. You can now use `lotus-shed actor propose-change-beneficiary` and `lotus-shed actor confirm-change-beneficiary` to change beneficiary addresses.
## Improvements
- backport: fix: miner: correctly count sector extensions (10555) ([filecoin-project/lotus#10555](https://github.com/filecoin-project/lotus/pull/10555))
- Fixes the issue with sector extensions.
- fix: proving: Initialize slice with with same length as partition (#10574) ([filecoin-project/lotus#10574])(https://github.com/filecoin-project/lotus/pull/10574)
- Fixes an issue where `lotus-miner proving compute window-post` paniced when trying to make skipped sectors human readable.
- feat: stmgr: speed up calculation of genesis circ supply (#10553) ([filecoin-project/lotus#10553])(https://github.com/filecoin-project/lotus/pull/10553)
- perf: eth: gas estimate set applyTsMessages false (#10546) ([filecoin-project/lotus#10456](https://github.com/filecoin-project/lotus/pull/10546))
- feat: config: Force existing users to opt into new defaults (#10488) ([filecoin-project/lotus#10488](https://github.com/filecoin-project/lotus/pull/10488))
- Force existing users to opt into the new SplitStore defaults.
- fix: splitstore: Demote now common logs (#10516) ([filecoin-project/lotus#10516](https://github.com/filecoin-project/lotus/pull/10516))
- fix: splitstore: Don't enforce walking receipt tree during compaction ([filecoin-project/lotus#10502](https://github.com/filecoin-project/lotus/pull/10502))
- fix: splitstore: Fix the overzealous fix (#10366) ([filecoin-project/lotus#10366](https://github.com/filecoin-project/lotus/pull/10366))
- fix: splitstore: Two fixes, better logging and comments (#10332) ([filecoin-project/lotus#10332](https://github.com/filecoin-project/lotus/pull/10332))
- fix: fsm: shutdown removed sectors FSMs ([filecoin-project/lotus#10363](https://github.com/filecoin-project/lotus/pull/10363))
- Fixes an issue where removed sectors still got state machine events.
- fix: rpcenc: Don't hang when source dies ([filecoin-project/lotus#10116](https://github.com/filecoin-project/lotus/pull/10116))
- Fixes an issue where AddPiece tasks could get stuck if the Boost process was abruptly lost.
- fix: make debugging windowPoSt-failures human readable ([filecoin-project/lotus#10390](https://github.com/filecoin-project/lotus/pull/10390))
- Makes the skipped sector list in `lotus-miner proving compute window-post` human readable.
- fix: cli: Hide `lotus-worker set` command ([filecoin-project/lotus#10384](https://github.com/filecoin-project/lotus/pull/10384))
- Hides the `lotus-worker set` command. This command will be deprecated later.
- fix: worker: Hide `wait-quiet` cmd ([filecoin-project/lotus#10331](https://github.com/filecoin-project/lotus/pull/10331))
- Hides the `lotus-worker wait-quiet` command. This command will be deprecated later.
- fix: post: Tune down default post-parallel-reads ([filecoin-project/lotus#10365](https://github.com/filecoin-project/lotus/pull/10365))
- Tuning down the default post-parallel-reads to a more conservative number to prevent sectors from being skipped due to network timeouts.
- fix: cli: error if backup file already exists ([filecoin-project/lotus#10209](https://github.com/filecoin-project/lotus/pull/10209))
- Error out if a backup file with the same name already exists when using the `lotus-miner backup` or `lotus backup` command
- fix: cli: option to set-seal-delay in seconds ([filecoin-project/lotus#10208](https://github.com/filecoin-project/lotus/pull/10208))
- Adds the option to specify `lotus-miner sectors set-seal-delay` in seconds
- fix: cli: extend cmd to get the right sector number ([filecoin-project/lotus#10182](https://github.com/filecoin-project/lotus/pull/10182))
- Making sure the `lotus-miner sectors extend` command gets the correct sector number.
- feat: wdpost: Emit more detailed errors ([filecoin-project/lotus#10121](https://github.com/filecoin-project/lotus/pull/10121))
- Emits more detailed windowPoSt error messages, making it easier to debug PoSt issues.
- fix: Lotus Gateway: Add missing methods - master ([filecoin-project/lotus#10420](https://github.com/filecoin-project/lotus/pull/10420))
- Adds `StateNetworkName`, `MpoolGetNonce`, `StateCall` and `StateDecodeParams` methods to Lotus Gateway.
- fix: stmgr: don't attempt to lookup genesis state (#10472) ([filecoin-project/lotus#10472](https://github.com/filecoin-project/lotus/pull/10472))
- feat: gateway: export StateVerifierStatus ([filecoin-project/lotus#10477](https://github.com/filecoin-project/lotus/pull/10477))
- fix: gateway: correctly apply the fee history lookback max ([filecoin-project/lotus#10464](https://github.com/filecoin-project/lotus/pull/10464))
- fix: gateway: drop overzealous guard on MsigGetVested ([filecoin-project/lotus#10451](https://github.com/filecoin-project/lotus/pull/10451))
- feat: apply gateway lookback limit to eth API lookback ([filecoin-project/lotus#10467](https://github.com/filecoin-project/lotus/pull/10467))
- fix: revert "Eth API: drop support for 'pending' block parameter." ([filecoin-project/lotus#10474](https://github.com/filecoin-project/lotus/pull/10474))
- fix: Eth API: make net_version return the chain ID ([filecoin-project/lotus#10456](https://github.com/filecoin-project/lotus/pull/10456))
- fix: eth: handle a potential divide by zero in receipt handling ([filecoin-project/lotus#10495](https://github.com/filecoin-project/lotus/pull/10495))
- fix: ethrpc: Don't lock up when eth subscriber goes away ([filecoin-project/lotus#10485](https://github.com/filecoin-project/lotus/pull/10485))
- feat: eth: Avoid StateCompute in EthTxnReceipt lookup (#10460) ([filecoin-project/lotus#10460](https://github.com/filecoin-project/lotus/pull/10460))
- feat: eth: optimize eth block loading + eth_feeHistory ([filecoin-project/lotus#10446](https://github.com/filecoin-project/lotus/pull/10446))
- feat: state: skip tipset execution when possible ([filecoin-project/lotus#10445](https://github.com/filecoin-project/lotus/pull/10445))
- feat: eth API: reject masked ID addresses embedded in f410f payloads ([filecoin-project/lotus#10440](https://github.com/filecoin-project/lotus/pull/10440))
- fix: Eth API: make block parameter parsing sounder. ([filecoin-project/lotus#10427](https://github.com/filecoin-project/lotus/pull/10427))
- fix: eth API: return correct txIdx around null blocks (#10419) ([filecoin-project/lotus#10419](https://github.com/filecoin-project/lotus/pull/10419))
- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium (#10413) ([filecoin-project/lotus#10413](https://github.com/filecoin-project/lotus/pull/10413))
- refactor: EthAPI: Drop unnecessary param from newEthTxReceipt ([filecoin-project/lotus#10411](https://github.com/filecoin-project/lotus/pull/10411))
- fix: eth API: correct gateway restrictions, drop unimplemented methods ([filecoin-project/lotus#10409](https://github.com/filecoin-project/lotus/pull/10409))
- fix: EthAPI: Correctly get parent hash ([filecoin-project/lotus#10389](https://github.com/filecoin-project/lotus/pull/10389))
- fix: EthAPI: Make newEthBlockFromFilecoinTipSet faster and correct ([filecoin-project/lotus#10380](https://github.com/filecoin-project/lotus/pull/10380))
- fix: eth: incorrect struct tags (#10309) ([filecoin-project/lotus#10309](https://github.com/filecoin-project/lotus/pull/10309))
- refactor: update cache to the new generic version (#10463) ([filecoin-project/lotus#10463](https://github.com/filecoin-project/lotus/pull/10463))
- feat: consensus: log ApplyBlock timing/gas stats ([filecoin-project/lotus#10470](https://github.com/filecoin-project/lotus/pull/10470))
- feat: chain: make chain tipset fetching 1000x faster ([filecoin-project/lotus#10423](https://github.com/filecoin-project/lotus/pull/10423))
- chain: explicitly check that gasLimit is above zero ([filecoin-project/lotus#10198](https://github.com/filecoin-project/lotus/pull/10198))
- feat: blockstore: Envvar can adjust badger compaction worker poolsize ([filecoin-project/lotus#9973](https://github.com/filecoin-project/lotus/pull/9973))
- feat: stmgr: add env to disable premigrations ([filecoin-project/lotus#10283](https://github.com/filecoin-project/lotus/pull/10283))
- chore: Remove legacy market info from lotus-miner info ([filecoin-project/lotus#10364](https://github.com/filecoin-project/lotus/pull/10364))
- Removes the legacy market info in the `Lotus-Miner info`. Speeds up the command significantly.
- chore: blockstore: Plumb through a proper Flush() method on all blockstores ([filecoin-project/lotus#10465](https://github.com/filecoin-project/lotus/pull/10465))
- fix: extend LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC to the markset ([filecoin-project/lotus#10172](https://github.com/filecoin-project/lotus/pull/10172))
- feat: vm: switch to the new exec trace format (#10372) ([filecoin-project/lotus#10372](https://github.com/filecoin-project/lotus/pull/10372))
- fix: Remove workaround that is no longer needed ([filecoin-project/lotus#9995](https://github.com/filecoin-project/lotus/pull/9995))
- feat: Check for allocation expiry when waiting to seal sectors ([filecoin-project/lotus#9878](https://github.com/filecoin-project/lotus/pull/9878))
- feat: Allow libp2p user agent to be overriden ([filecoin-project/lotus#10149](https://github.com/filecoin-project/lotus/pull/10149))
- feat: cli: Add global color flag ([filecoin-project/lotus#10022](https://github.com/filecoin-project/lotus/pull/10022))
- fix: should not serve non v0 api in v0 ([filecoin-project/lotus#10066](https://github.com/filecoin-project/lotus/pull/10066))
- fix: build: drop drand incentinet servers ([filecoin-project/lotus#10476](https://github.com/filecoin-project/lotus/pull/10476))
- fix: sealing: stub out the FileSize function on Windows ([filecoin-project/lotus#10035](https://github.com/filecoin-project/lotus/pull/10035))
## Dependencies
- github.com/filecoin-project/go-dagaggregator-unixfs (v0.2.0 -> v0.3.0):
- github.com/filecoin-project/go-fil-markets (v1.25.2 -> v1.27.0-rc1):
- github.com/filecoin-project/go-jsonrpc (v0.2.1 -> v0.2.3):
- github.com/filecoin-project/go-statemachine (v1.0.2 -> v1.0.3):
- github.com/filecoin-project/go-state-types (v0.10.0 -> v0.11.0-alpha-3)
- github.com/ipfs/go-cid (v0.3.2 -> v0.4.0):
- github.com/ipfs/go-libipfs (v0.5.0 -> v0.7.0):
- github.com/ipfs/go-path (v0.3.0 -> v0.3.1):
- chore: deps: update to go-state-types v0.11.0-alpha-3 (([filecoin-project/lotus#10606](https://github.com/filecoin-project/lotus/pull/10606))
- deps: update go-libp2p-pubsub to v0.9.3 ([filecoin-project/lotus#10483](https://github.com/filecoin-project/lotus/pull/10483))
- deps: Update go-jsonrpc to v0.2.2 ([filecoin-project/lotus#10395](https://github.com/filecoin-project/lotus/pull/10395))
- Update to go-data-transfer v2 and libp2p, still wip ([filecoin-project/lotus#10382](https://github.com/filecoin-project/lotus/pull/10382))
- dep: ipld: update ipld prime to v0.20.0 ([filecoin-project/lotus#10247](https://github.com/filecoin-project/lotus/pull/10247))
- chore: node: migrate go-bitswap to go-libipfs/bitswap ([filecoin-project/lotus#10138](https://github.com/filecoin-project/lotus/pull/10138))
- chore: all: bump go-libipfs to replace go-block-format ([filecoin-project/lotus#10126](https://github.com/filecoin-project/lotus/pull/10126))
- chore: market: Upgrade to index-provider 0.10.0 ([filecoin-project/lotus#9981](https://github.com/filecoin-project/lotus/pull/9981))
- chore: all: bump go-libipfs ([filecoin-project/lotus#10563](https://github.com/filecoin-project/lotus/pull/10563))
## Others
- Update service_developer_bug_report.yml ([filecoin-project/lotus#10321](https://github.com/filecoin-project/lotus/pull/10321))
- Update service_developer_bug_report.yml ([filecoin-project/lotus#10321](https://github.com/filecoin-project/lotus/pull/10321))
- chore: github: Service-provider/dev bug template ([filecoin-project/lotus#10321](https://github.com/filecoin-project/lotus/pull/10321))
- chore: github: update enhancement and feature templates ([filecoin-project/lotus#10291](https://github.com/filecoin-project/lotus/pull/10291))
- chore: github: Update bug_report template ([filecoin-project/lotus#10289](https://github.com/filecoin-project/lotus/pull/10289))
- fix: itest: avoid failing the test when we race the miner ([filecoin-project/lotus#10461](https://github.com/filecoin-project/lotus/pull/10461))
- fix: github: Discussion and FIP links in `New Issue` ([filecoin-project/lotus#10268](https://github.com/filecoin-project/lotus/pull/10268))
- fix: state: short-circuit genesis state computation ([filecoin-project/lotus#10397](https://github.com/filecoin-project/lotus/pull/10397))
- fix: rpcenc: deflake TestReaderRedirectDrop ([filecoin-project/lotus#10406](https://github.com/filecoin-project/lotus/pull/10406))
- fix: tests: Fix TestMinerAllInfo test ([filecoin-project/lotus#10319](https://github.com/filecoin-project/lotus/pull/10319))
- fix: tests: Make TestWorkerKeyChange not flaky ([filecoin-project/lotus#10320](https://github.com/filecoin-project/lotus/pull/10320))
- test: eth: make sure we can deploy a new placeholder on transfer (#10281) ([filecoin-project/lotus#10281](https://github.com/filecoin-project/lotus/pull/10281))
- fix: itests: Fix flaky paych test ([filecoin-project/lotus#10100](https://github.com/filecoin-project/lotus/pull/10100))
- fix: cli: add ArgsUsage ([filecoin-project/lotus#10147](https://github.com/filecoin-project/lotus/pull/10147))
- chore: cli: cleanup cli ([filecoin-project/lotus#10114](https://github.com/filecoin-project/lotus/pull/10114))
- chore: cli: Remove unneeded individual color flags ([filecoin-project/lotus#10028](https://github.com/filecoin-project/lotus/pull/10028))
- fix: cli: remove requirements in helptext ([filecoin-project/lotus#9969](https://github.com/filecoin-project/lotus/pull/9969))
- chore: build: release v1.21.0-rc1 prep ([filecoin-project/lotus#10524](https://github.com/filecoin-project/lotus/pull/10524))
- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10308](https://github.com/filecoin-project/lotus/pull/10308))
- chore: merge release branch into master ([filecoin-project/lotus#10272](https://github.com/filecoin-project/lotus/pull/10272))
- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10238](https://github.com/filecoin-project/lotus/pull/10238))
- chore: releases to master ([filecoin-project/lotus#10490](https://github.com/filecoin-project/lotus/pull/10490))
- chore: merge releases into master ([filecoin-project/lotus#10377](https://github.com/filecoin-project/lotus/pull/10377))
- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10030](https://github.com/filecoin-project/lotus/pull/10030))
- chore: update ffi to increase execution parallelism (#10480) ([filecoin-project/lotus#10480](https://github.com/filecoin-project/lotus/pull/10480))
- chore: update the FFI for release (#10435) ([filecoin-project/lotus#10444](https://github.com/filecoin-project/lotus/pull/10444))
- build: bump version to v1.21.0-dev ([filecoin-project/lotus#10249](https://github.com/filecoin-project/lotus/pull/10249))
- build: docker: Update GO-version (#10591) ([filecoin-project/lotus#10249](https://github.com/filecoin-project/lotus/pull/10591))
- chore: merge release/v1.20.0 into master ([filecoin-project/lotus#10184](https://github.com/filecoin-project/lotus/pull/10184))
- docs: API Gateway: patch documentation note about make gen command ([filecoin-project/lotus#10422](https://github.com/filecoin-project/lotus/pull/10422))
- chore: docs: fix docs typos ([filecoin-project/lotus#10155](https://github.com/filecoin-project/lotus/pull/10155))
- chore: docker: Add back <<network>> parameter for docker push ([filecoin-project/lotus#10096](https://github.com/filecoin-project/lotus/pull/10096))
- chore: docker: Properly balance <<?>> in circleci docker config ([filecoin-project/lotus#10088](https://github.com/filecoin-project/lotus/pull/10088))
- chore: ci: Fix dirty git state when building docker images ([filecoin-project/lotus#10125](https://github.com/filecoin-project/lotus/pull/10125))
- chore: build: Remove AppImage and Snapcraft build automation ([filecoin-project/lotus#10003](https://github.com/filecoin-project/lotus/pull/10003))
- chore: ci: Update codeql to v2 ([filecoin-project/lotus#10120](https://github.com/filecoin-project/lotus/pull/10120))
- feat: ci: make ci more efficient ([filecoin-project/lotus#9910](https://github.com/filecoin-project/lotus/pull/9910))
- feat: scripts: go.mod dep diff script ([filecoin-project/lotus#9711](https://github.com/filecoin-project/lotus/pull/9711))
## Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| Hannah Howard | 2 | +2909/-6026 | 84 |
| Łukasz Magiera | 42 | +2967/-1848 | 95 |
| Steven Allen | 20 | +1703/-1345 | 88 |
| Alfonso de la Rocha | 17 | +823/-1808 | 86 |
| Peter Rabbitson | 9 | +1957/-219 | 34 |
| Geoff Stuart | 12 | +818/-848 | 29 |
| hannahhoward | 5 | +507/-718 | 36 |
| Hector Sanjuan | 6 | +443/-726 | 35 |
| Kevin Li | 1 | +1124/-14 | 22 |
| zenground0 | 30 | +791/-269 | 88 |
| frrist | 1 | +992/-16 | 13 |
| Travis Person | 4 | +837/-53 | 24 |
| Phi | 20 | +622/-254 | 34 |
| Ian Davis | 7 | +35/-729 | 20 |
| Aayush | 10 | +378/-177 | 40 |
| Raúl Kripalani | 15 | +207/-138 | 19 |
| Arsenii Petrovich | 7 | +248/-94 | 30 |
| ZenGround0 | 5 | +238/-39 | 15 |
| Neel Virdy | 1 | +109/-107 | 58 |
| ychiao | 1 | +135/-39 | 3 |
| Jorropo | 2 | +87/-82 | 67 |
| Marten Seemann | 8 | +69/-64 | 17 |
| Rod Vagg | 1 | +55/-16 | 3 |
| Masih H. Derkani | 3 | +39/-27 | 12 |
| raulk | 2 | +30/-29 | 5 |
| dependabot[bot] | 4 | +37/-17 | 8 |
| beck | 2 | +38/-2 | 2 |
| Jennifer Wang | 4 | +20/-19 | 19 |
| Richard Guan | 3 | +28/-8 | 5 |
| omahs | 7 | +14/-14 | 7 |
| dirkmc | 2 | +19/-7 | 6 |
| David Choi | 2 | +16/-5 | 2 |
| Mike Greenberg | 1 | +18/-1 | 1 |
| Adin Schmahmann | 1 | +19/-0 | 2 |
| Phi-rjan | 5 | +12/-4 | 5 |
| Dirk McCormick | 2 | +6/-6 | 3 |
| Aayush Rajasekaran | 2 | +9/-3 | 2 |
| Jiaying Wang | 5 | +6/-4 | 5 |
| Anjor Kanekar | 1 | +5/-5 | 1 |
| vyzo | 1 | +3/-3 | 2 |
| 0x5459 | 1 | +1/-1 | 1 |
# v1.22.1 / 2023-04-23
## Important Notice
@ -166,7 +606,10 @@ verifiedregistry bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a
### Dependencies
github.com/filecoin-project/go-state-types (v0.11.0-rc1 -> v0.11.1):
<<<<<<< HEAD
=======
>>>>>>> releases
# v1.20.4 / 2023-03-17
This is a patch release intended to alleviate performance issues reported by some users since the nv18 upgrade.
@ -186,7 +629,22 @@ Users with higher memory specs can experiment with setting `LOTUS_FVM_CONCURRENC
# v1.20.3 / 2023-03-09
A 🐈 stepped on the ⌨️ and made a mistake while resolving conflicts 😨. This releases only includes #10439 to fix that mistake. v1.20.2 is retracted - Please skip v1.20.2 and only update to v1.20.3!!!
A 🐈 stepped on the ⌨️ and made a mistake while resolving conflicts 😨. This releases only includes #10439 to fix that mistake. v1.20.2 is retracted - Please skip v1.20.2 and **only** update to v1.20.3!!!
## Changelog
> compare to v1.20.1
This is a HIGHLY RECOMMENDED patch release for node operators/API service providers that run ETH RPC service and an optional release for Storage Providers.
## Bug fixes
- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium #10413
- fix: eth API: return correct txIdx around null blocks #10419
- fix: Eth API: make block parameter parsing sounder. #10427
## Improvement
- feat: Lotus Gateway: Add missing methods - master #10420
- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x #10416
- We recommend storage providers to update your nodes to this patch, that will help improve developers who use Ethereum tooling's experience.
# v1.20.2 / 2023-03-09

View File

@ -1,5 +1,5 @@
#####################################
FROM golang:1.18.8-buster AS lotus-builder
FROM golang:1.19.7-buster AS lotus-builder
MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev

View File

@ -1,6 +1,6 @@
##### DEPRECATED
FROM golang:1.18.8-buster AS builder-deps
FROM golang:1.19.7-buster AS builder-deps
MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev

View File

@ -1 +1 @@
1.18.8
1.19.7

View File

@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l
#### Go
To build Lotus, you need a working installation of [Go 1.18.8 or higher](https://golang.org/dl/):
To build Lotus, you need a working installation of [Go 1.19.7 or higher](https://golang.org/dl/):
```bash
wget -c https://golang.org/dl/go1.18.8.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
wget -c https://golang.org/dl/go1.19.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
```
**TIP:**

View File

@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-jsonrpc"
@ -173,10 +173,24 @@ type FullNode interface {
// If oldmsgskip is set, messages from before the requested roots are also not included.
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
// ChainPrune prunes the stored chain state and garbage collects; only supported if you
// ChainExportRangeInternal triggers the export of a chain
// CAR-snapshot directly to disk. It is similar to ChainExport,
// except, depending on options, the snapshot can include receipts,
// messages and stateroots for the length between the specified head
// and tail, thus producing "archival-grade" snapshots that include
// all the on-chain data. The header chain is included back to
// genesis and these snapshots can be used to initialize Filecoin
// nodes.
ChainExportRangeInternal(ctx context.Context, head, tail types.TipSetKey, cfg ChainExportConfig) error //perm:admin
// ChainPrune forces compaction on cold store and garbage collects; only supported if you
// are using the splitstore
ChainPrune(ctx context.Context, opts PruneOpts) error //perm:admin
// ChainHotGC does online (badger) GC on the hot store; only supported if you are using
// the splitstore
ChainHotGC(ctx context.Context, opts HotGCOpts) error //perm:admin
// ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore
// if supported by the underlying implementation.
ChainCheckBlockstore(context.Context) error //perm:admin
@ -783,10 +797,12 @@ type FullNode interface {
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) //perm:read
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
@ -1344,6 +1360,12 @@ type PruneOpts struct {
RetainState int64
}
type HotGCOpts struct {
Threshold float64
Periodic bool
Moving bool
}
type EthTxReceipt struct {
TransactionHash ethtypes.EthHash `json:"transactionHash"`
TransactionIndex ethtypes.EthUint64 `json:"transactionIndex"`

View File

@ -26,7 +26,7 @@ import (
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Run `make clean && make deps && make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
@ -70,6 +70,7 @@ type Gateway interface {
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
@ -84,12 +85,12 @@ type Gateway interface {
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error)
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error)
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error)
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error)
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error)
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error)
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error)

View File

@ -11,7 +11,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
@ -214,7 +214,9 @@ type StorageMiner interface {
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
// MarketListRetrievalDeals is deprecated, returns empty list
MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin

View File

@ -50,22 +50,6 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
return err
}
// t.WaitSentinel (cid.Cid) (struct)
if len("WaitSentinel") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
return err
}
if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil {
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
}
// t.Vouchers ([]*paych.SignedVoucher) (slice)
if len("Vouchers") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Vouchers\" was too long")
@ -90,6 +74,23 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
return err
}
}
// t.WaitSentinel (cid.Cid) (struct)
if len("WaitSentinel") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
return err
}
if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil {
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
}
return nil
}
@ -140,19 +141,6 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
return xerrors.Errorf("unmarshaling t.Channel: %w", err)
}
}
// t.WaitSentinel (cid.Cid) (struct)
case "WaitSentinel":
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
}
t.WaitSentinel = c
}
// t.Vouchers ([]*paych.SignedVoucher) (slice)
case "Vouchers":
@ -184,6 +172,20 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
t.Vouchers[i] = &v
}
// t.WaitSentinel (cid.Cid) (struct)
case "WaitSentinel":
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
}
t.WaitSentinel = c
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
@ -204,19 +206,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
return err
}
// t.SectorID (abi.SectorNumber) (uint64)
if len("SectorID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"SectorID\" was too long")
// t.Size (abi.UnpaddedPieceSize) (uint64)
if len("Size") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Size\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("SectorID")); err != nil {
if _, err := io.WriteString(w, string("Size")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
return err
}
@ -236,19 +238,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
return err
}
// t.Size (abi.UnpaddedPieceSize) (uint64)
if len("Size") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Size\" was too long")
// t.SectorID (abi.SectorNumber) (uint64)
if len("SectorID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"SectorID\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Size")); err != nil {
if _, err := io.WriteString(w, string("SectorID")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
return err
}
@ -293,8 +295,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.SectorID (abi.SectorNumber) (uint64)
case "SectorID":
// t.Size (abi.UnpaddedPieceSize) (uint64)
case "Size":
{
@ -305,7 +307,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.SectorID = abi.SectorNumber(extra)
t.Size = abi.UnpaddedPieceSize(extra)
}
// t.Offset (abi.PaddedPieceSize) (uint64)
@ -323,8 +325,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
t.Offset = abi.PaddedPieceSize(extra)
}
// t.Size (abi.UnpaddedPieceSize) (uint64)
case "Size":
// t.SectorID (abi.SectorNumber) (uint64)
case "SectorID":
{
@ -335,7 +337,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Size = abi.UnpaddedPieceSize(extra)
t.SectorID = abi.SectorNumber(extra)
}
@ -474,6 +476,28 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
// t.Value (abi.SealRandomness) (slice)
if len("Value") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Value\" was too long")
@ -497,28 +521,6 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
if _, err := cw.Write(t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
return nil
}
@ -560,6 +562,32 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
// t.Value (abi.SealRandomness) (slice)
case "Value":
@ -582,32 +610,6 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it
@ -629,6 +631,28 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
// t.Value (abi.InteractiveSealRandomness) (slice)
if len("Value") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Value\" was too long")
@ -652,28 +676,6 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
if _, err := cw.Write(t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
return nil
}
@ -715,6 +717,32 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
// t.Value (abi.InteractiveSealRandomness) (slice)
case "Value":
@ -737,32 +765,6 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it
@ -784,6 +786,22 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
return err
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealID")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
return err
}
// t.PublishCid (cid.Cid) (struct)
if len("PublishCid") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
@ -806,22 +824,6 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
}
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealID")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
return err
}
// t.DealProposal (market.DealProposal) (struct)
if len("DealProposal") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
@ -910,6 +912,21 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.DealID = abi.DealID(extra)
}
// t.PublishCid (cid.Cid) (struct)
case "PublishCid":
@ -932,21 +949,6 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
t.PublishCid = &c
}
}
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.DealID = abi.DealID(extra)
}
// t.DealProposal (market.DealProposal) (struct)
case "DealProposal":
@ -1140,28 +1142,6 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
return err
}
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
return err
}
}
// t.EndEpoch (abi.ChainEpoch) (int64)
if len("EndEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
@ -1183,6 +1163,28 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
return err
}
}
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
return err
}
}
return nil
}
@ -1224,32 +1226,6 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
// t.EndEpoch (abi.ChainEpoch) (int64)
case "EndEpoch":
{
@ -1267,7 +1243,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -1276,6 +1252,32 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
t.EndEpoch = abi.ChainEpoch(extraI)
}
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it

View File

@ -27,7 +27,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/filestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-jsonrpc/auth"
@ -152,8 +152,8 @@ func init() {
addExample(map[string]int{"name": 42})
addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
addExample(&types.ExecutionTrace{
Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
Msg: ExampleValue("init", reflect.TypeOf(types.MessageTrace{}), nil).(types.MessageTrace),
MsgRct: ExampleValue("init", reflect.TypeOf(types.ReturnTrace{}), nil).(types.ReturnTrace),
})
addExample(map[string]types.Actor{
"t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),

View File

@ -21,7 +21,7 @@ import (
address "github.com/filecoin-project/go-address"
bitfield "github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
jsonrpc "github.com/filecoin-project/go-jsonrpc"
auth "github.com/filecoin-project/go-jsonrpc/auth"
@ -155,6 +155,20 @@ func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3)
}
// ChainExportRangeInternal mocks base method.
func (m *MockFullNode) ChainExportRangeInternal(arg0 context.Context, arg1, arg2 types.TipSetKey, arg3 api.ChainExportConfig) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ChainExportRangeInternal", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(error)
return ret0
}
// ChainExportRangeInternal indicates an expected call of ChainExportRangeInternal.
func (mr *MockFullNodeMockRecorder) ChainExportRangeInternal(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExportRangeInternal", reflect.TypeOf((*MockFullNode)(nil).ChainExportRangeInternal), arg0, arg1, arg2, arg3)
}
// ChainGetBlock mocks base method.
func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) {
m.ctrl.T.Helper()
@ -380,6 +394,20 @@ func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0)
}
// ChainHotGC mocks base method.
func (m *MockFullNode) ChainHotGC(arg0 context.Context, arg1 api.HotGCOpts) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ChainHotGC", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ChainHotGC indicates an expected call of ChainHotGC.
func (mr *MockFullNodeMockRecorder) ChainHotGC(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHotGC", reflect.TypeOf((*MockFullNode)(nil).ChainHotGC), arg0, arg1)
}
// ChainNotify mocks base method.
func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) {
m.ctrl.T.Helper()
@ -1268,6 +1296,21 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionByHash(arg0, arg1 interface
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHash", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHash), arg0, arg1)
}
// EthGetTransactionByHashLimited mocks base method.
func (m *MockFullNode) EthGetTransactionByHashLimited(arg0 context.Context, arg1 *ethtypes.EthHash, arg2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EthGetTransactionByHashLimited", arg0, arg1, arg2)
ret0, _ := ret[0].(*ethtypes.EthTx)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EthGetTransactionByHashLimited indicates an expected call of EthGetTransactionByHashLimited.
func (mr *MockFullNodeMockRecorder) EthGetTransactionByHashLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHashLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHashLimited), arg0, arg1, arg2)
}
// EthGetTransactionCount mocks base method.
func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthUint64, error) {
m.ctrl.T.Helper()
@ -1313,6 +1356,21 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionReceipt(arg0, arg1 interfac
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceipt", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceipt), arg0, arg1)
}
// EthGetTransactionReceiptLimited mocks base method.
func (m *MockFullNode) EthGetTransactionReceiptLimited(arg0 context.Context, arg1 ethtypes.EthHash, arg2 abi.ChainEpoch) (*api.EthTxReceipt, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EthGetTransactionReceiptLimited", arg0, arg1, arg2)
ret0, _ := ret[0].(*api.EthTxReceipt)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EthGetTransactionReceiptLimited indicates an expected call of EthGetTransactionReceiptLimited.
func (mr *MockFullNodeMockRecorder) EthGetTransactionReceiptLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceiptLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceiptLimited), arg0, arg1, arg2)
}
// EthMaxPriorityFeePerGas mocks base method.
func (m *MockFullNode) EthMaxPriorityFeePerGas(arg0 context.Context) (ethtypes.EthBigInt, error) {
m.ctrl.T.Helper()

View File

@ -18,7 +18,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
@ -140,6 +140,8 @@ type FullNodeMethods struct {
ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"`
ChainExportRangeInternal func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey, p3 ChainExportConfig) error `perm:"admin"`
ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"`
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `perm:"read"`
@ -170,6 +172,8 @@ type FullNodeMethods struct {
ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
ChainHotGC func(p0 context.Context, p1 HotGCOpts) error `perm:"admin"`
ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) `perm:"read"`
ChainPrune func(p0 context.Context, p1 PruneOpts) error `perm:"admin"`
@ -286,12 +290,16 @@ type FullNodeMethods struct {
EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) `perm:"read"`
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `perm:"read"`
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) `perm:"read"`
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `perm:"read"`
EthGetTransactionReceipt func(p0 context.Context, p1 ethtypes.EthHash) (*EthTxReceipt, error) `perm:"read"`
EthGetTransactionReceiptLimited func(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) `perm:"read"`
EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"`
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"`
@ -688,18 +696,18 @@ type GatewayMethods struct {
EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) ``
EthGetTransactionByBlockHashAndIndex func(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) ``
EthGetTransactionByBlockNumberAndIndex func(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) ``
EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) ``
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) ``
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) ``
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) ``
EthGetTransactionReceipt func(p0 context.Context, p1 ethtypes.EthHash) (*EthTxReceipt, error) ``
EthGetTransactionReceiptLimited func(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) ``
EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) ``
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) ``
@ -772,6 +780,8 @@ type GatewayMethods struct {
StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
Version func(p0 context.Context) (APIVersion, error) ``
@ -951,7 +961,7 @@ type StorageMinerMethods struct {
MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
MarketListRetrievalDeals func(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"`
MarketListRetrievalDeals func(p0 context.Context) ([]struct{}, error) `perm:"read"`
MarketPendingDeals func(p0 context.Context) (PendingDealInfo, error) `perm:"write"`
@ -1447,6 +1457,17 @@ func (s *FullNodeStub) ChainExport(p0 context.Context, p1 abi.ChainEpoch, p2 boo
return nil, ErrNotSupported
}
func (s *FullNodeStruct) ChainExportRangeInternal(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey, p3 ChainExportConfig) error {
if s.Internal.ChainExportRangeInternal == nil {
return ErrNotSupported
}
return s.Internal.ChainExportRangeInternal(p0, p1, p2, p3)
}
func (s *FullNodeStub) ChainExportRangeInternal(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey, p3 ChainExportConfig) error {
return ErrNotSupported
}
func (s *FullNodeStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
if s.Internal.ChainGetBlock == nil {
return nil, ErrNotSupported
@ -1612,6 +1633,17 @@ func (s *FullNodeStub) ChainHead(p0 context.Context) (*types.TipSet, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) ChainHotGC(p0 context.Context, p1 HotGCOpts) error {
if s.Internal.ChainHotGC == nil {
return ErrNotSupported
}
return s.Internal.ChainHotGC(p0, p1)
}
func (s *FullNodeStub) ChainHotGC(p0 context.Context, p1 HotGCOpts) error {
return ErrNotSupported
}
func (s *FullNodeStruct) ChainNotify(p0 context.Context) (<-chan []*HeadChange, error) {
if s.Internal.ChainNotify == nil {
return nil, ErrNotSupported
@ -2250,6 +2282,17 @@ func (s *FullNodeStub) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByHashLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionByHashLimited(p0, p1, p2)
}
func (s *FullNodeStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
if s.Internal.EthGetTransactionCount == nil {
return *new(ethtypes.EthUint64), ErrNotSupported
@ -2283,6 +2326,17 @@ func (s *FullNodeStub) EthGetTransactionReceipt(p0 context.Context, p1 ethtypes.
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
if s.Internal.EthGetTransactionReceiptLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionReceiptLimited(p0, p1, p2)
}
func (s *FullNodeStub) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthMaxPriorityFeePerGas(p0 context.Context) (ethtypes.EthBigInt, error) {
if s.Internal.EthMaxPriorityFeePerGas == nil {
return *new(ethtypes.EthBigInt), ErrNotSupported
@ -4395,28 +4449,6 @@ func (s *GatewayStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress
return *new(ethtypes.EthBytes), ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByBlockHashAndIndex(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByBlockHashAndIndex == nil {
return *new(ethtypes.EthTx), ErrNotSupported
}
return s.Internal.EthGetTransactionByBlockHashAndIndex(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionByBlockHashAndIndex(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
return *new(ethtypes.EthTx), ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByBlockNumberAndIndex(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByBlockNumberAndIndex == nil {
return *new(ethtypes.EthTx), ErrNotSupported
}
return s.Internal.EthGetTransactionByBlockNumberAndIndex(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionByBlockNumberAndIndex(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
return *new(ethtypes.EthTx), ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByHash == nil {
return nil, ErrNotSupported
@ -4428,6 +4460,17 @@ func (s *GatewayStub) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.E
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByHashLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionByHashLimited(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
if s.Internal.EthGetTransactionCount == nil {
return *new(ethtypes.EthUint64), ErrNotSupported
@ -4461,6 +4504,17 @@ func (s *GatewayStub) EthGetTransactionReceipt(p0 context.Context, p1 ethtypes.E
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
if s.Internal.EthGetTransactionReceiptLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionReceiptLimited(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthMaxPriorityFeePerGas(p0 context.Context) (ethtypes.EthBigInt, error) {
if s.Internal.EthMaxPriorityFeePerGas == nil {
return *new(ethtypes.EthBigInt), ErrNotSupported
@ -4857,6 +4911,17 @@ func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.A
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
if s.Internal.StateVerifierStatus == nil {
return nil, ErrNotSupported
}
return s.Internal.StateVerifierStatus(p0, p1, p2)
}
func (s *GatewayStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
if s.Internal.StateWaitMsg == nil {
return nil, ErrNotSupported
@ -5671,15 +5736,15 @@ func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]stor
return *new([]storagemarket.MinerDeal), ErrNotSupported
}
func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) {
func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) {
if s.Internal.MarketListRetrievalDeals == nil {
return *new([]retrievalmarket.ProviderDealState), ErrNotSupported
return *new([]struct{}), ErrNotSupported
}
return s.Internal.MarketListRetrievalDeals(p0)
}
func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) {
return *new([]retrievalmarket.ProviderDealState), ErrNotSupported
func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) {
return *new([]struct{}), ErrNotSupported
}
func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) {

View File

@ -8,13 +8,15 @@ import (
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-graphsync"
"github.com/ipld/go-ipld-prime"
"github.com/ipld/go-ipld-prime/codec/dagjson"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
@ -110,17 +112,13 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
IsSender: channelState.Sender() == hostID,
Message: channelState.Message(),
}
stringer, ok := channelState.Voucher().(fmt.Stringer)
if ok {
channel.Voucher = stringer.String()
} else {
voucherJSON, err := json.Marshal(channelState.Voucher())
voucher := channelState.Voucher()
voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode)
if err != nil {
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
} else {
channel.Voucher = string(voucherJSON)
}
}
if channel.IsSender {
channel.IsInitiator = !channelState.IsPull()
channel.Transferred = channelState.Sent()
@ -400,3 +398,12 @@ func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
}
return nil
}
// ChainExportConfig holds configuration for chain ranged exports.
type ChainExportConfig struct {
WriteBufferSize int
NumWorkers int
IncludeMessages bool
IncludeReceipts bool
IncludeStateRoots bool
}

View File

@ -10,7 +10,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -12,7 +12,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -20,7 +20,7 @@ import (
address "github.com/filecoin-project/go-address"
bitfield "github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
auth "github.com/filecoin-project/go-jsonrpc/auth"

View File

@ -20,6 +20,7 @@ import (
pool "github.com/libp2p/go-buffer-pool"
"github.com/multiformats/go-base32"
"go.uber.org/zap"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/blockstore"
)
@ -45,6 +46,7 @@ const (
MemoryMap = options.MemoryMap
// LoadToRAM is equivalent to badger/options.LoadToRAM.
LoadToRAM = options.LoadToRAM
defaultGCThreshold = 0.125
)
// Options embeds the badger options themselves, and augments them with
@ -394,6 +396,9 @@ func (b *Blockstore) doCopy(from, to *badger.DB) error {
if workers < 2 {
workers = 2
}
if workers > 8 {
workers = 8
}
stream := from.NewStream()
stream.NumGo = workers
@ -439,7 +444,7 @@ func (b *Blockstore) deleteDB(path string) {
}
}
func (b *Blockstore) onlineGC() error {
func (b *Blockstore) onlineGC(ctx context.Context, threshold float64, checkFreq time.Duration, check func() error) error {
b.lockDB()
defer b.unlockDB()
@ -448,14 +453,26 @@ func (b *Blockstore) onlineGC() error {
if nworkers < 2 {
nworkers = 2
}
if nworkers > 7 { // max out at 1 goroutine per badger level
nworkers = 7
}
err := b.db.Flatten(nworkers)
if err != nil {
return err
}
checkTick := time.NewTimer(checkFreq)
defer checkTick.Stop()
for err == nil {
err = b.db.RunValueLogGC(0.125)
select {
case <-ctx.Done():
err = ctx.Err()
case <-checkTick.C:
err = check()
checkTick.Reset(checkFreq)
default:
err = b.db.RunValueLogGC(threshold)
}
}
if err == badger.ErrNoRewrite {
@ -468,7 +485,7 @@ func (b *Blockstore) onlineGC() error {
// CollectGarbage compacts and runs garbage collection on the value log;
// implements the BlockstoreGC trait
func (b *Blockstore) CollectGarbage(opts ...blockstore.BlockstoreGCOption) error {
func (b *Blockstore) CollectGarbage(ctx context.Context, opts ...blockstore.BlockstoreGCOption) error {
if err := b.access(); err != nil {
return err
}
@ -485,8 +502,58 @@ func (b *Blockstore) CollectGarbage(opts ...blockstore.BlockstoreGCOption) error
if options.FullGC {
return b.movingGC()
}
threshold := options.Threshold
if threshold == 0 {
threshold = defaultGCThreshold
}
checkFreq := options.CheckFreq
if checkFreq < 30*time.Second { // disallow checking more frequently than block time
checkFreq = 30 * time.Second
}
check := options.Check
if check == nil {
check = func() error {
return nil
}
}
return b.onlineGC(ctx, threshold, checkFreq, check)
}
return b.onlineGC()
// GCOnce runs garbage collection on the value log;
// implements BlockstoreGCOnce trait
func (b *Blockstore) GCOnce(ctx context.Context, opts ...blockstore.BlockstoreGCOption) error {
if err := b.access(); err != nil {
return err
}
defer b.viewers.Done()
var options blockstore.BlockstoreGCOptions
for _, opt := range opts {
err := opt(&options)
if err != nil {
return err
}
}
if options.FullGC {
return xerrors.Errorf("FullGC option specified for GCOnce but full GC is non incremental")
}
threshold := options.Threshold
if threshold == 0 {
threshold = defaultGCThreshold
}
b.lockDB()
defer b.unlockDB()
// Note no compaction needed before single GC as we will hit at most one vlog anyway
err := b.db.RunValueLogGC(threshold)
if err == badger.ErrNoRewrite {
// not really an error in this case, it signals the end of GC
return nil
}
return err
}
// Size returns the aggregate size of the blockstore
@ -551,6 +618,18 @@ func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) erro
})
}
func (b *Blockstore) Flush(context.Context) error {
if err := b.access(); err != nil {
return err
}
defer b.viewers.Done()
b.lockDB()
defer b.unlockDB()
return b.db.Sync()
}
// Has implements Blockstore.Has.
func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
if err := b.access(); err != nil {

View File

@ -145,7 +145,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
return nil
})
g.Go(func() error {
return db.CollectGarbage(blockstore.WithFullGC(true))
return db.CollectGarbage(ctx, blockstore.WithFullGC(true))
})
err = g.Wait()
@ -230,7 +230,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
checkPath()
// now do another FullGC to test the double move and following of symlinks
if err := db.CollectGarbage(blockstore.WithFullGC(true)); err != nil {
if err := db.CollectGarbage(ctx, blockstore.WithFullGC(true)); err != nil {
t.Fatal(err)
}

View File

@ -2,6 +2,7 @@ package blockstore
import (
"context"
"time"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
@ -18,6 +19,7 @@ type Blockstore interface {
blockstore.Blockstore
blockstore.Viewer
BatchDeleter
Flusher
}
// BasicBlockstore is an alias to the original IPFS Blockstore.
@ -25,6 +27,10 @@ type BasicBlockstore = blockstore.Blockstore
type Viewer = blockstore.Viewer
type Flusher interface {
Flush(context.Context) error
}
type BatchDeleter interface {
DeleteMany(ctx context.Context, cids []cid.Cid) error
}
@ -36,7 +42,12 @@ type BlockstoreIterator interface {
// BlockstoreGC is a trait for blockstores that support online garbage collection
type BlockstoreGC interface {
CollectGarbage(options ...BlockstoreGCOption) error
CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error
}
// BlockstoreGCOnce is a trait for a blockstore that supports incremental online garbage collection
type BlockstoreGCOnce interface {
GCOnce(ctx context.Context, options ...BlockstoreGCOption) error
}
// BlockstoreGCOption is a functional interface for controlling blockstore GC options
@ -45,6 +56,12 @@ type BlockstoreGCOption = func(*BlockstoreGCOptions) error
// BlockstoreGCOptions is a struct with GC options
type BlockstoreGCOptions struct {
FullGC bool
// fraction of garbage in badger vlog before its worth processing in online GC
Threshold float64
// how often to call the check function
CheckFreq time.Duration
// function to call periodically to pause or early terminate GC
Check func() error
}
func WithFullGC(fullgc bool) BlockstoreGCOption {
@ -54,6 +71,27 @@ func WithFullGC(fullgc bool) BlockstoreGCOption {
}
}
func WithThreshold(threshold float64) BlockstoreGCOption {
return func(opts *BlockstoreGCOptions) error {
opts.Threshold = threshold
return nil
}
}
func WithCheckFreq(f time.Duration) BlockstoreGCOption {
return func(opts *BlockstoreGCOptions) error {
opts.CheckFreq = f
return nil
}
}
func WithCheck(check func() error) BlockstoreGCOption {
return func(opts *BlockstoreGCOptions) error {
opts.Check = check
return nil
}
}
// BlockstoreSize is a trait for on-disk blockstores that can report their size
type BlockstoreSize interface {
Size() (int64, error)
@ -92,6 +130,13 @@ type adaptedBlockstore struct {
var _ Blockstore = (*adaptedBlockstore)(nil)
func (a *adaptedBlockstore) Flush(ctx context.Context) error {
if flusher, canFlush := a.Blockstore.(Flusher); canFlush {
return flusher.Flush(ctx)
}
return nil
}
func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
blk, err := a.Get(ctx, cid)
if err != nil {

View File

@ -46,6 +46,8 @@ var (
_ Viewer = (*BufferedBlockstore)(nil)
)
func (bs *BufferedBlockstore) Flush(ctx context.Context) error { return bs.write.Flush(ctx) }
func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
a, err := bs.read.AllKeysChan(ctx)
if err != nil {

View File

@ -38,6 +38,10 @@ func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) err
return b.bs.View(ctx, cid, f)
}
func (b *discardstore) Flush(ctx context.Context) error {
return nil
}
func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error {
return nil
}

View File

@ -179,3 +179,7 @@ func (b *idstore) Close() error {
}
return nil
}
func (b *idstore) Flush(ctx context.Context) error {
return b.bs.Flush(ctx)
}

View File

@ -3,7 +3,7 @@ package blockstore
import (
"bytes"
"context"
"io/ioutil"
"io"
"github.com/ipfs/go-cid"
httpapi "github.com/ipfs/go-ipfs-http-client"
@ -103,7 +103,7 @@ func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, er
return nil, xerrors.Errorf("getting ipfs block: %w", err)
}
data, err := ioutil.ReadAll(rd)
data, err := io.ReadAll(rd)
if err != nil {
return nil, err
}

View File

@ -17,6 +17,8 @@ func NewMemory() MemBlockstore {
// To match behavior of badger blockstore we index by multihash only.
type MemBlockstore map[string]blocks.Block
func (MemBlockstore) Flush(context.Context) error { return nil }
func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
delete(m, string(k.Hash()))
return nil

View File

@ -410,6 +410,8 @@ func (n *NetworkStore) HashOnRead(enabled bool) {
return
}
func (*NetworkStore) Flush(context.Context) error { return nil }
func (n *NetworkStore) Stop(ctx context.Context) error {
close(n.closing)

View File

@ -11,6 +11,8 @@ import (
"github.com/ipfs/go-cid"
"go.uber.org/zap"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/system"
)
type BadgerMarkSetEnv struct {
@ -349,7 +351,7 @@ func (s *BadgerMarkSet) write(seqno int) (err error) {
persist := s.persist
s.mx.RUnlock()
if persist {
if persist && !system.BadgerFsyncDisable { // WARNING: disabling sync makes recovery from crash during critical section unsound
return s.db.Sync()
}

View File

@ -115,6 +115,23 @@ type Config struct {
// A positive value is the number of compactions before a full GC is performed;
// a value of 1 will perform full GC in every compaction.
HotStoreFullGCFrequency uint64
// HotstoreMaxSpaceTarget suggests the max allowed space the hotstore can take.
// This is not a hard limit, it is possible for the hotstore to exceed the target
// for example if state grows massively between compactions. The splitstore
// will make a best effort to avoid overflowing the target and in practice should
// never overflow. This field is used when doing GC at the end of a compaction to
// adaptively choose moving GC
HotstoreMaxSpaceTarget uint64
// Moving GC will be triggered when total moving size exceeds
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold
HotstoreMaxSpaceThreshold uint64
// Safety buffer to prevent moving GC from overflowing disk.
// Moving GC will not occur when total moving size exceeds
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer
HotstoreMaxSpaceSafetyBuffer uint64
}
// ChainAccessor allows the Splitstore to access the chain. It will most likely
@ -165,10 +182,16 @@ type SplitStore struct {
compactionIndex int64
pruneIndex int64
onlineGCCnt int64
ctx context.Context
cancel func()
outOfSync int32 // for fast checking
chainSyncMx sync.Mutex
chainSyncCond sync.Cond
chainSyncFinished bool // protected by chainSyncMx
debug *debugLog
// transactional protection for concurrent read/writes during compaction
@ -195,6 +218,17 @@ type SplitStore struct {
// registered protectors
protectors []func(func(cid.Cid) error) error
// dag sizes measured during latest compaction
// logged and used for GC strategy
// protected by compaction lock
szWalk int64
szProtectedTxns int64
szKeys int64 // approximate, not counting keys protected when entering critical section
// protected by txnLk
szMarkedLiveRefs int64
}
var _ bstore.Blockstore = (*SplitStore)(nil)
@ -232,6 +266,7 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
ss.txnViewsCond.L = &ss.txnViewsMx
ss.txnSyncCond.L = &ss.txnSyncMx
ss.chainSyncCond.L = &ss.chainSyncMx
ss.ctx, ss.cancel = context.WithCancel(context.Background())
ss.reifyCond.L = &ss.reifyMx
@ -447,6 +482,23 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
}
}
func (s *SplitStore) Flush(ctx context.Context) error {
s.txnLk.RLock()
defer s.txnLk.RUnlock()
if err := s.cold.Flush(ctx); err != nil {
return err
}
if err := s.hot.Flush(ctx); err != nil {
return err
}
if err := s.ds.Sync(ctx, dstore.Key{}); err != nil {
return err
}
return nil
}
func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
if isIdentiyCid(blk.Cid()) {
return nil
@ -776,6 +828,11 @@ func (s *SplitStore) Close() error {
s.txnSyncCond.Broadcast()
s.txnSyncMx.Unlock()
s.chainSyncMx.Lock()
s.chainSyncFinished = true
s.chainSyncCond.Broadcast()
s.chainSyncMx.Unlock()
log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
for atomic.LoadInt32(&s.compacting) == 1 {
time.Sleep(time.Second)

View File

@ -95,7 +95,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
}
defer visitor.Close() //nolint
err = s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor,
size := s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor,
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk
@ -133,7 +133,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
return err
}
log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt)
log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt, "walk size", size)
write("--")
write("cold: %d missing: %d", *coldCnt, *missingCnt)
write("DONE")

View File

@ -67,6 +67,7 @@ var (
const (
batchSize = 16384
cidKeySize = 128
)
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
@ -90,7 +91,35 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
// Regardless, we put a mutex in HeadChange just to be safe
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
// we are currently compacting -- protect the new tipset(s)
// we are currently compacting
// 1. Signal sync condition to yield compaction when out of sync and resume when in sync
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
if CheckSyncGap && time.Since(timestamp) > SyncGapTime {
/* Chain out of sync */
if atomic.CompareAndSwapInt32(&s.outOfSync, 0, 1) {
// transition from in sync to out of sync
s.chainSyncMx.Lock()
s.chainSyncFinished = false
s.chainSyncMx.Unlock()
}
// already out of sync, no signaling necessary
}
// TODO: ok to use hysteresis with no transitions between 30s and 1m?
if time.Since(timestamp) < SyncWaitTime {
/* Chain in sync */
if atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) {
// already in sync, no signaling necessary
} else {
// transition from out of sync to in sync
s.chainSyncMx.Lock()
s.chainSyncFinished = true
s.chainSyncCond.Broadcast()
s.chainSyncMx.Unlock()
}
}
// 2. protect the new tipset(s)
s.protectTipSets(apply)
return nil
}
@ -115,8 +144,6 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
return nil
}
// Prioritize hot store compaction over cold store prune
if epoch-s.baseEpoch > CompactionThreshold {
// it's time to compact -- prepare the transaction and go!
s.beginTxnProtect()
@ -176,6 +203,8 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
doSync := time.Since(timestamp) < SyncWaitTime
go func() {
// we are holding the txnLk while marking
// so critical section cannot delete
if doSync {
defer func() {
s.txnSyncMx.Lock()
@ -199,9 +228,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
log.Debugf("marking %d live refs", len(cids))
startMark := time.Now()
szMarked := new(int64)
count := new(int32)
visitor := newConcurrentVisitor()
walkObject := func(c cid.Cid) error {
walkObject := func(c cid.Cid) (int64, error) {
return s.walkObjectIncomplete(c, visitor,
func(c cid.Cid) error {
if isUnitaryObject(c) {
@ -228,10 +259,12 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
// optimize the common case of single put
if len(cids) == 1 {
if err := walkObject(cids[0]); err != nil {
sz, err := walkObject(cids[0])
if err != nil {
log.Errorf("error marking tipset refs: %s", err)
}
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
atomic.AddInt64(szMarked, sz)
return
}
@ -243,9 +276,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
worker := func() error {
for c := range workch {
if err := walkObject(c); err != nil {
sz, err := walkObject(c)
if err != nil {
return err
}
atomic.AddInt64(szMarked, sz)
}
return nil
@ -268,7 +303,8 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
log.Errorf("error marking tipset refs: %s", err)
}
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count, "size marked", *szMarked)
s.szMarkedLiveRefs += atomic.LoadInt64(szMarked)
}
// transactionally protect a view
@ -361,6 +397,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
log.Infow("protecting transactional references", "refs", len(txnRefs))
count := 0
sz := new(int64)
workch := make(chan cid.Cid, len(txnRefs))
startProtect := time.Now()
@ -393,10 +430,11 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
worker := func() error {
for c := range workch {
err := s.doTxnProtect(c, markSet)
szTxn, err := s.doTxnProtect(c, markSet)
if err != nil {
return xerrors.Errorf("error protecting transactional references to %s: %w", c, err)
}
atomic.AddInt64(sz, szTxn)
}
return nil
}
@ -409,16 +447,16 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
if err := g.Wait(); err != nil {
return err
}
log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count)
s.szProtectedTxns += atomic.LoadInt64(sz)
log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count, "protected size", sz)
}
}
// transactionally protect a reference by walking the object and marking.
// concurrent markings are short circuited by checking the markset.
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
if err := s.checkClosing(); err != nil {
return err
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error) {
if err := s.checkYield(); err != nil {
return 0, err
}
// Note: cold objects are deleted heaviest first, so the consituents of an object
@ -442,7 +480,7 @@ func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
},
func(c cid.Cid) error {
if s.txnMissing != nil {
log.Warnf("missing object reference %s in %s", c, root)
log.Debugf("missing object reference %s in %s", c, root)
s.txnRefsMx.Lock()
s.txnMissing[c] = struct{}{}
s.txnRefsMx.Unlock()
@ -509,6 +547,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
// might be potentially inconsistent; abort compaction and notify the user to intervene.
return xerrors.Errorf("checkpoint exists; aborting compaction")
}
s.clearSizeMeasurements()
currentEpoch := curTs.Height()
boundaryEpoch := currentEpoch - CompactionBoundary
@ -534,7 +573,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
}
defer coldSet.Close() //nolint:errcheck
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
@ -598,7 +637,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
}
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold)
if err != nil {
return xerrors.Errorf("error marking: %w", err)
}
@ -607,7 +645,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
log.Infow("marking done", "took", time.Since(startMark), "marked", *count)
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
@ -617,7 +655,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return xerrors.Errorf("error protecting transactional refs: %w", err)
}
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
@ -638,7 +676,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
defer purgew.Close() //nolint:errcheck
// some stats for logging
var hotCnt, coldCnt, purgeCnt int
var hotCnt, coldCnt, purgeCnt int64
err = s.hot.ForEachKey(func(c cid.Cid) error {
// was it marked?
mark, err := markSet.Has(c)
@ -689,11 +727,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
log.Infow("cold collection done", "took", time.Since(startCollect))
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt)
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt)))
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt)))
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt)
s.szKeys = hotCnt * cidKeySize
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(hotCnt))
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(coldCnt))
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
@ -702,7 +741,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
// possibly delete objects we didn't have when we were collecting cold objects)
s.waitForMissingRefs(markSet)
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
@ -722,7 +761,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
}
log.Infow("moving done", "took", time.Since(startMove))
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
@ -753,7 +792,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
}
// wait for the head to catch up so that the current tipset is marked
s.waitForSync()
s.waitForTxnSync()
if err := s.checkClosing(); err != nil {
return err
@ -773,8 +812,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return xerrors.Errorf("error purging cold objects: %w", err)
}
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
s.endCriticalSection()
log.Infow("critical section done", "total protected size", s.szProtectedTxns, "total marked live size", s.szMarkedLiveRefs)
if err := checkpoint.Close(); err != nil {
log.Warnf("error closing checkpoint: %s", err)
@ -788,10 +827,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
if err := os.Remove(s.coldSetPath()); err != nil {
log.Warnf("error removing coldset: %s", err)
}
if err := os.Remove(s.discardSetPath()); err != nil {
log.Warnf("error removing discardset: %s", err)
}
// we are done; do some housekeeping
s.endTxnProtect()
s.gcHotstore()
s.gcHotAfterCompaction()
err = s.setBaseEpoch(boundaryEpoch)
if err != nil {
@ -851,7 +893,7 @@ func (s *SplitStore) beginCriticalSection(markSet MarkSet) error {
return nil
}
func (s *SplitStore) waitForSync() {
func (s *SplitStore) waitForTxnSync() {
log.Info("waiting for sync")
if !CheckSyncGap {
log.Warnf("If you see this outside of test it is a serious splitstore issue")
@ -870,6 +912,25 @@ func (s *SplitStore) waitForSync() {
}
}
// Block compaction operations if chain sync has fallen behind
func (s *SplitStore) waitForSync() {
if atomic.LoadInt32(&s.outOfSync) == 0 {
return
}
s.chainSyncMx.Lock()
defer s.chainSyncMx.Unlock()
for !s.chainSyncFinished {
s.chainSyncCond.Wait()
}
}
// Combined sync and closing check
func (s *SplitStore) checkYield() error {
s.waitForSync()
return s.checkClosing()
}
func (s *SplitStore) endTxnProtect() {
s.txnLk.Lock()
defer s.txnLk.Unlock()
@ -904,6 +965,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
copy(toWalk, ts.Cids())
walkCnt := new(int64)
scanCnt := new(int64)
szWalk := new(int64)
tsRef := func(blkCids []cid.Cid) (cid.Cid, error) {
return types.NewTipSetKey(blkCids...).Cid()
@ -939,48 +1001,64 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
if err != nil {
return xerrors.Errorf("error computing cid reference to parent tipset")
}
if err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk); err != nil {
sz, err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking parent tipset cid reference")
}
atomic.AddInt64(szWalk, sz)
// message are retained if within the inclMsgs boundary
if hdr.Height >= inclMsgs && hdr.Height > 0 {
if inclMsgs < inclState {
// we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we
// synced from snapshot and have a long HotStoreMessageRetentionPolicy.
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil {
sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
}
atomic.AddInt64(szWalk, sz)
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil {
sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}
atomic.AddInt64(szWalk, sz)
} else {
if err := s.walkObject(hdr.Messages, visitor, fHot); err != nil {
sz, err = s.walkObject(hdr.Messages, visitor, fHot)
if err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
}
atomic.AddInt64(szWalk, sz)
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil {
sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}
atomic.AddInt64(szWalk, sz)
}
}
// messages and receipts outside of inclMsgs are included in the cold store
if hdr.Height < inclMsgs && hdr.Height > 0 {
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil {
sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
}
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil {
atomic.AddInt64(szWalk, sz)
sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}
atomic.AddInt64(szWalk, sz)
}
// state is only retained if within the inclState boundary, with the exception of genesis
if hdr.Height >= inclState || hdr.Height == 0 {
if err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil {
sz, err := s.walkObject(hdr.ParentStateRoot, visitor, fHot)
if err != nil {
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
}
atomic.AddInt64(szWalk, sz)
atomic.AddInt64(scanCnt, 1)
}
@ -998,13 +1076,15 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
if err != nil {
return xerrors.Errorf("error computing cid reference to parent tipset")
}
if err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk); err != nil {
sz, err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking parent tipset cid reference")
}
atomic.AddInt64(szWalk, sz)
for len(toWalk) > 0 {
// walking can take a while, so check this with every opportunity
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
@ -1044,123 +1124,129 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
}
}
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt)
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt, "walk size", szWalk)
s.szWalk = atomic.LoadInt64(szWalk)
return nil
}
func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) error {
func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) (int64, error) {
var sz int64
visit, err := visitor.Visit(c)
if err != nil {
return xerrors.Errorf("error visiting object: %w", err)
return 0, xerrors.Errorf("error visiting object: %w", err)
}
if !visit {
return nil
return sz, nil
}
if err := f(c); err != nil {
if err == errStopWalk {
return nil
return sz, nil
}
return err
return 0, err
}
if c.Prefix().Codec != cid.DagCBOR {
return nil
return sz, nil
}
// check this before recursing
if err := s.checkClosing(); err != nil {
return err
if err := s.checkYield(); err != nil {
return 0, err
}
var links []cid.Cid
err = s.view(c, func(data []byte) error {
sz += int64(len(data))
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
links = append(links, c)
})
})
if err != nil {
return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
}
for _, c := range links {
err := s.walkObject(c, visitor, f)
szLink, err := s.walkObject(c, visitor, f)
if err != nil {
return xerrors.Errorf("error walking link (cid: %s): %w", c, err)
return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err)
}
sz += szLink
}
return nil
return sz, nil
}
// like walkObject, but the object may be potentially incomplete (references missing)
func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) error {
func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int64, error) {
var sz int64
visit, err := visitor.Visit(c)
if err != nil {
return xerrors.Errorf("error visiting object: %w", err)
return 0, xerrors.Errorf("error visiting object: %w", err)
}
if !visit {
return nil
return sz, nil
}
// occurs check -- only for DAGs
if c.Prefix().Codec == cid.DagCBOR {
has, err := s.has(c)
if err != nil {
return xerrors.Errorf("error occur checking %s: %w", c, err)
return 0, xerrors.Errorf("error occur checking %s: %w", c, err)
}
if !has {
err = missing(c)
if err == errStopWalk {
return nil
return sz, nil
}
return err
return 0, err
}
}
if err := f(c); err != nil {
if err == errStopWalk {
return nil
return sz, nil
}
return err
return 0, err
}
if c.Prefix().Codec != cid.DagCBOR {
return nil
return sz, nil
}
// check this before recursing
if err := s.checkClosing(); err != nil {
return err
if err := s.checkYield(); err != nil {
return sz, err
}
var links []cid.Cid
err = s.view(c, func(data []byte) error {
sz += int64(len(data))
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
links = append(links, c)
})
})
if err != nil {
return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
}
for _, c := range links {
err := s.walkObjectIncomplete(c, visitor, f, missing)
szLink, err := s.walkObjectIncomplete(c, visitor, f, missing)
if err != nil {
return xerrors.Errorf("error walking link (cid: %s): %w", c, err)
return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err)
}
sz += szLink
}
return nil
return sz, nil
}
// internal version used during compaction and related operations
@ -1223,7 +1309,7 @@ func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error {
batch := make([]blocks.Block, 0, batchSize)
err := coldr.ForEach(func(c cid.Cid) error {
if err := s.checkClosing(); err != nil {
if err := s.checkYield(); err != nil {
return err
}
blk, err := s.hot.Get(s.ctx, c)
@ -1426,8 +1512,9 @@ func (s *SplitStore) completeCompaction() error {
}
s.compactType = none
// Note: at this point we can start the splitstore; a compaction should run on
// the first head change, which will trigger gc on the hotstore.
// Note: at this point we can start the splitstore; base epoch is not
// incremented here so a compaction should run on the first head
// change, which will trigger gc on the hotstore.
// We don't mind the second (back-to-back) compaction as the head will
// have advanced during marking and coldset accumulation.
return nil
@ -1485,6 +1572,13 @@ func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint,
return nil
}
func (s *SplitStore) clearSizeMeasurements() {
s.szKeys = 0
s.szMarkedLiveRefs = 0
s.szProtectedTxns = 0
s.szWalk = 0
}
// I really don't like having this code, but we seem to have some occasional DAG references with
// missing constituents. During testing in mainnet *some* of these references *sometimes* appeared
// after a little bit.
@ -1525,7 +1619,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSet) {
missing = make(map[cid.Cid]struct{})
for c := range towalk {
err := s.walkObjectIncomplete(c, visitor,
_, err := s.walkObjectIncomplete(c, visitor,
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk

View File

@ -77,6 +77,10 @@ func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error
return size, err
}
func (es *exposedSplitStore) Flush(ctx context.Context) error {
return es.s.Flush(ctx)
}
func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error {
return es.s.Put(ctx, blk)
}

View File

@ -7,23 +7,74 @@ import (
bstore "github.com/filecoin-project/lotus/blockstore"
)
func (s *SplitStore) gcHotstore() {
const (
// Fraction of garbage in badger vlog for online GC traversal to collect garbage
AggressiveOnlineGCThreshold = 0.0001
)
func (s *SplitStore) gcHotAfterCompaction() {
// Measure hotstore size, determine if we should do full GC, determine if we can do full GC.
// We should do full GC if
// FullGCFrequency is specified and compaction index matches frequency
// OR HotstoreMaxSpaceTarget is specified and total moving space is within 150 GB of target
// We can do full if
// HotstoreMaxSpaceTarget is not specified
// OR total moving space would not exceed 50 GB below target
//
// a) If we should not do full GC => online GC
// b) If we should do full GC and can => moving GC
// c) If we should do full GC and can't => aggressive online GC
getSize := func() int64 {
sizer, ok := s.hot.(bstore.BlockstoreSize)
if ok {
size, err := sizer.Size()
if err != nil {
log.Warnf("error getting hotstore size: %s, estimating empty hot store for targeting", err)
return 0
}
return size
}
log.Errorf("Could not measure hotstore size, assuming it is 0 bytes, which it is not")
return 0
}
hotSize := getSize()
copySizeApprox := s.szKeys + s.szMarkedLiveRefs + s.szProtectedTxns + s.szWalk
shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceThreshold)
shouldFreq := s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0
shouldDoFull := shouldTarget || shouldFreq
canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceSafetyBuffer)
log.Debugw("approximating new hot store size", "key size", s.szKeys, "marked live refs", s.szMarkedLiveRefs, "protected txns", s.szProtectedTxns, "walked DAG", s.szWalk)
log.Infof("measured hot store size: %d, approximate new size: %d, should do full %t, can do full %t", hotSize, copySizeApprox, shouldDoFull, canDoFull)
var opts []bstore.BlockstoreGCOption
if s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 {
if shouldDoFull && canDoFull {
opts = append(opts, bstore.WithFullGC(true))
} else if shouldDoFull && !canDoFull {
log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, s.cfg.HotstoreMaxSpaceSafetyBuffer, s.cfg.HotstoreMaxSpaceTarget)
log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`")
log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at aggressive thresholds (< 0.01) with `lotus chain prune hot`")
opts = append(opts, bstore.WithThreshold(AggressiveOnlineGCThreshold))
}
if err := s.gcBlockstore(s.hot, opts); err != nil {
log.Warnf("error garbage collecting hostore: %s", err)
}
log.Infof("measured hot store size after GC: %d", getSize())
}
func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error {
if err := s.checkYield(); err != nil {
return err
}
if gc, ok := b.(bstore.BlockstoreGC); ok {
log.Info("garbage collecting blockstore")
startGC := time.Now()
if err := gc.CollectGarbage(opts...); err != nil {
opts = append(opts, bstore.WithCheckFreq(90*time.Second))
opts = append(opts, bstore.WithCheck(s.checkYield))
if err := gc.CollectGarbage(s.ctx, opts...); err != nil {
return err
}
@ -33,3 +84,19 @@ func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreG
return fmt.Errorf("blockstore doesn't support garbage collection: %T", b)
}
func (s *SplitStore) gcBlockstoreOnce(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error {
if gc, ok := b.(bstore.BlockstoreGCOnce); ok {
log.Debug("gc blockstore once")
startGC := time.Now()
if err := gc.GCOnce(s.ctx, opts...); err != nil {
return err
}
log.Debugw("gc blockstore once done", "took", time.Since(startGC))
return nil
}
return fmt.Errorf("blockstore doesn't support gc once: %T", b)
}

View File

@ -47,6 +47,23 @@ var (
PruneThreshold = 7 * build.Finality
)
// GCHotstore runs online GC on the chain state in the hotstore according the to options specified
func (s *SplitStore) GCHotStore(opts api.HotGCOpts) error {
if opts.Moving {
gcOpts := []bstore.BlockstoreGCOption{bstore.WithFullGC(true)}
return s.gcBlockstore(s.hot, gcOpts)
}
gcOpts := []bstore.BlockstoreGCOption{bstore.WithThreshold(opts.Threshold)}
var err error
if opts.Periodic {
err = s.gcBlockstore(s.hot, gcOpts)
} else {
err = s.gcBlockstoreOnce(s.hot, gcOpts)
}
return err
}
// PruneChain instructs the SplitStore to prune chain state in the coldstore, according to the
// options specified.
func (s *SplitStore) PruneChain(opts api.PruneOpts) error {
@ -329,9 +346,9 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
}
s.pruneIndex++
err = s.ds.Put(s.ctx, pruneIndexKey, int64ToBytes(s.compactionIndex))
err = s.ds.Put(s.ctx, pruneIndexKey, int64ToBytes(s.pruneIndex))
if err != nil {
return xerrors.Errorf("error saving compaction index: %w", err)
return xerrors.Errorf("error saving prune index: %w", err)
}
return nil

View File

@ -101,7 +101,7 @@ func (s *SplitStore) doReify(c cid.Cid) {
defer s.txnLk.RUnlock()
count := 0
err := s.walkObjectIncomplete(c, newTmpVisitor(),
_, err := s.walkObjectIncomplete(c, newTmpVisitor(),
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk

View File

@ -757,6 +757,8 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error {
return nil
}
func (b *mockStore) Flush(context.Context) error { return nil }
func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, errors.New("not implemented")
}

View File

@ -20,6 +20,8 @@ type SyncBlockstore struct {
bs MemBlockstore // specifically use a memStore to save indirection overhead.
}
func (*SyncBlockstore) Flush(context.Context) error { return nil }
func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
m.mu.Lock()
defer m.mu.Unlock()

View File

@ -93,6 +93,16 @@ func (t *TimedCacheBlockstore) rotate() {
t.mu.Unlock()
}
func (t *TimedCacheBlockstore) Flush(ctx context.Context) error {
t.mu.Lock()
defer t.mu.Unlock()
if err := t.active.Flush(ctx); err != nil {
return err
}
return t.inactive.Flush(ctx)
}
func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error {
// Don't check the inactive set here. We want to keep this block for at
// least one interval.

View File

@ -55,6 +55,15 @@ func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, er
return size, err
}
func (m unionBlockstore) Flush(ctx context.Context) (err error) {
for _, bs := range m {
if err = bs.Flush(ctx); err != nil {
break
}
}
return err
}
func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) {
for _, bs := range m {
if err = bs.Put(ctx, block); err != nil {

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -26,17 +26,13 @@ const UnixfsLinksPerLevel = 1024
const AllowableClockDriftSecs = uint64(1)
// TODO: nv19: Re-enable when migration is setup
//// Used by tests and some obscure tooling
///* inline-gen template
//
//const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
//
///* inline-gen start */
// Used by tests and some obscure tooling
/* inline-gen template
const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
/* inline-gen start */
const TestNetworkVersion = network.Version20
const TestNetworkVersion = network.Version18
///* inline-gen end */
/* inline-gen end */
// Epochs
const ForkLengthThreshold = Finality

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
const BuildVersion = "1.22.1"
const BuildVersion = "1.23.0"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"go/format"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@ -66,7 +65,7 @@ func generateAdapters() error {
}
{
af, err := ioutil.ReadFile(filepath.Join(actDir, "actor.go.template"))
af, err := os.ReadFile(filepath.Join(actDir, "actor.go.template"))
if err != nil {
return xerrors.Errorf("loading actor template: %w", err)
}
@ -90,7 +89,7 @@ func generateAdapters() error {
return err
}
if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("%s.go", act)), fmted, 0666); err != nil {
if err := os.WriteFile(filepath.Join(actDir, fmt.Sprintf("%s.go", act)), fmted, 0666); err != nil {
return err
}
}
@ -100,7 +99,7 @@ func generateAdapters() error {
}
func generateState(actDir string, versions []int) error {
af, err := ioutil.ReadFile(filepath.Join(actDir, "state.go.template"))
af, err := os.ReadFile(filepath.Join(actDir, "state.go.template"))
if err != nil {
if os.IsNotExist(err) {
return nil // skip
@ -123,7 +122,7 @@ func generateState(actDir string, versions []int) error {
return err
}
if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("v%d.go", version)), b.Bytes(), 0666); err != nil {
if err := os.WriteFile(filepath.Join(actDir, fmt.Sprintf("v%d.go", version)), b.Bytes(), 0666); err != nil {
return err
}
}
@ -132,7 +131,7 @@ func generateState(actDir string, versions []int) error {
}
func generateMessages(actDir string) error {
af, err := ioutil.ReadFile(filepath.Join(actDir, "message.go.template"))
af, err := os.ReadFile(filepath.Join(actDir, "message.go.template"))
if err != nil {
if os.IsNotExist(err) {
return nil // skip
@ -155,7 +154,7 @@ func generateMessages(actDir string) error {
return err
}
if err := ioutil.WriteFile(filepath.Join(actDir, fmt.Sprintf("message%d.go", version)), b.Bytes(), 0666); err != nil {
if err := os.WriteFile(filepath.Join(actDir, fmt.Sprintf("message%d.go", version)), b.Bytes(), 0666); err != nil {
return err
}
}
@ -165,7 +164,7 @@ func generateMessages(actDir string) error {
func generatePolicy(policyPath string) error {
pf, err := ioutil.ReadFile(policyPath + ".template")
pf, err := os.ReadFile(policyPath + ".template")
if err != nil {
if os.IsNotExist(err) {
return nil // skip
@ -187,7 +186,7 @@ func generatePolicy(policyPath string) error {
return err
}
if err := ioutil.WriteFile(policyPath, b.Bytes(), 0666); err != nil {
if err := os.WriteFile(policyPath, b.Bytes(), 0666); err != nil {
return err
}
@ -196,7 +195,7 @@ func generatePolicy(policyPath string) error {
func generateBuiltin(builtinPath string) error {
bf, err := ioutil.ReadFile(builtinPath + ".template")
bf, err := os.ReadFile(builtinPath + ".template")
if err != nil {
if os.IsNotExist(err) {
return nil // skip
@ -218,7 +217,7 @@ func generateBuiltin(builtinPath string) error {
return err
}
if err := ioutil.WriteFile(builtinPath, b.Bytes(), 0666); err != nil {
if err := os.WriteFile(builtinPath, b.Bytes(), 0666); err != nil {
return err
}
@ -227,7 +226,7 @@ func generateBuiltin(builtinPath string) error {
func generateRegistry(registryPath string) error {
bf, err := ioutil.ReadFile(registryPath + ".template")
bf, err := os.ReadFile(registryPath + ".template")
if err != nil {
if os.IsNotExist(err) {
return nil // skip
@ -248,7 +247,7 @@ func generateRegistry(registryPath string) error {
return err
}
if err := ioutil.WriteFile(registryPath, b.Bytes(), 0666); err != nil {
if err := os.WriteFile(registryPath, b.Bytes(), 0666); err != nil {
return err
}

View File

@ -83,6 +83,7 @@ type State interface {
GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error)
GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error)
GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error)
GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error)
GetState() interface{}
}

View File

@ -170,6 +170,27 @@ func (s *state{{.v}}) GetClaims(providerIdAddr address.Address) (map[ClaimId]Cla
{{end}}
}
func (s *state{{.v}}) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
{{if (le .v 8)}}
return nil, xerrors.Errorf("unsupported in actors v{{.v}}")
{{else}}
v{{.v}}Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
retMap := make(map[abi.SectorNumber][]ClaimId)
for k, v := range v{{.v}}Map {
claims, ok := retMap[v.Sector]
if !ok {
retMap[v.Sector] = []ClaimId{ClaimId(k)}
} else {
retMap[v.Sector] = append(claims, ClaimId(k))
}
}
return retMap, err
{{end}}
}
func (s *state{{.v}}) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -118,6 +118,12 @@ func (s *state0) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state0) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v0")
}
func (s *state0) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -134,6 +134,24 @@ func (s *state10) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim,
}
func (s *state10) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
v10Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
retMap := make(map[abi.SectorNumber][]ClaimId)
for k, v := range v10Map {
claims, ok := retMap[v.Sector]
if !ok {
retMap[v.Sector] = []ClaimId{ClaimId(k)}
} else {
retMap[v.Sector] = append(claims, ClaimId(k))
}
}
return retMap, err
}
func (s *state10) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -134,6 +134,24 @@ func (s *state11) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim,
}
func (s *state11) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
v11Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
retMap := make(map[abi.SectorNumber][]ClaimId)
for k, v := range v11Map {
claims, ok := retMap[v.Sector]
if !ok {
retMap[v.Sector] = []ClaimId{ClaimId(k)}
} else {
retMap[v.Sector] = append(claims, ClaimId(k))
}
}
return retMap, err
}
func (s *state11) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -118,6 +118,12 @@ func (s *state2) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state2) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v2")
}
func (s *state2) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -119,6 +119,12 @@ func (s *state3) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state3) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v3")
}
func (s *state3) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -119,6 +119,12 @@ func (s *state4) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state4) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v4")
}
func (s *state4) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -119,6 +119,12 @@ func (s *state5) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state5) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v5")
}
func (s *state5) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -119,6 +119,12 @@ func (s *state6) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state6) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v6")
}
func (s *state6) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -118,6 +118,12 @@ func (s *state7) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state7) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v7")
}
func (s *state7) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -118,6 +118,12 @@ func (s *state8) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state8) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
return nil, xerrors.Errorf("unsupported in actors v8")
}
func (s *state8) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -133,6 +133,24 @@ func (s *state9) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e
}
func (s *state9) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
v9Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
retMap := make(map[abi.SectorNumber][]ClaimId)
for k, v := range v9Map {
claims, ok := retMap[v.Sector]
if !ok {
retMap[v.Sector] = []ClaimId{ClaimId(k)}
} else {
retMap[v.Sector] = append(claims, ClaimId(k))
}
}
return retMap, err
}
func (s *state9) ActorKey() string {
return manifest.VerifregKey
}

View File

@ -137,6 +137,7 @@ type State interface {
GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error)
GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error)
GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error)
GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error)
GetState() interface{}
}

View File

@ -3,14 +3,14 @@ package chain
import (
"fmt"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/build"
)
type BadBlockCache struct {
badBlocks *lru.ARCCache
badBlocks *lru.ARCCache[cid.Cid, BadBlockReason]
}
type BadBlockReason struct {
@ -43,7 +43,7 @@ func (bbr BadBlockReason) String() string {
}
func NewBadBlockCache() *BadBlockCache {
cache, err := lru.NewARC(build.BadBlockCacheSize)
cache, err := lru.NewARC[cid.Cid, BadBlockReason](build.BadBlockCacheSize)
if err != nil {
panic(err) // ok
}
@ -66,10 +66,5 @@ func (bts *BadBlockCache) Purge() {
}
func (bts *BadBlockCache) Has(c cid.Cid) (BadBlockReason, bool) {
rval, ok := bts.badBlocks.Get(c)
if !ok {
return BadBlockReason{}, false
}
return rval.(BadBlockReason), true
return bts.badBlocks.Get(c)
}

View File

@ -8,14 +8,14 @@ import (
dchain "github.com/drand/drand/chain"
dclient "github.com/drand/drand/client"
hclient "github.com/drand/drand/client/http"
"github.com/drand/drand/common/scheme"
dlog "github.com/drand/drand/log"
gclient "github.com/drand/drand/lp2p/client"
"github.com/drand/kyber"
kzap "github.com/go-kit/kit/log/zap"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"go.uber.org/zap/zapcore"
"go.uber.org/zap"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
@ -61,7 +61,7 @@ type DrandBeacon struct {
filGenTime uint64
filRoundTime uint64
localCache *lru.Cache
localCache *lru.Cache[uint64, *types.BeaconEntry]
}
// DrandHTTPClient interface overrides the user agent used by drand
@ -69,6 +69,18 @@ type DrandHTTPClient interface {
SetUserAgent(string)
}
type logger struct {
*zap.SugaredLogger
}
func (l *logger) With(args ...interface{}) dlog.Logger {
return &logger{l.SugaredLogger.With(args...)}
}
func (l *logger) Named(s string) dlog.Logger {
return &logger{l.SugaredLogger.Named(s)}
}
func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) {
if genesisTs == 0 {
panic("what are you doing this cant be zero")
@ -79,9 +91,6 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err)
}
dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger(
log.SugaredLogger.Desugar(), zapcore.InfoLevel))
var clients []dclient.Client
for _, url := range config.Servers {
hc, err := hclient.NewWithInfo(url, drandChain, nil)
@ -96,7 +105,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
opts := []dclient.Option{
dclient.WithChainInfo(drandChain),
dclient.WithCacheSize(1024),
dclient.WithLogger(dlogger),
dclient.WithLogger(&logger{&log.SugaredLogger}),
}
if ps != nil {
@ -110,7 +119,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
return nil, xerrors.Errorf("creating drand client: %w", err)
}
lc, err := lru.New(1024)
lc, err := lru.New[uint64, *types.BeaconEntry](1024)
if err != nil {
return nil, err
}
@ -160,16 +169,12 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
return out
}
func (db *DrandBeacon) cacheValue(e types.BeaconEntry) {
db.localCache.Add(e.Round, e)
db.localCache.Add(e.Round, &e)
}
func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry {
v, ok := db.localCache.Get(round)
if !ok {
return nil
}
e, _ := v.(types.BeaconEntry)
return &e
v, _ := db.localCache.Get(round)
return v
}
func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error {
@ -194,7 +199,7 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
Round: curr.Round,
Signature: curr.Data,
}
err := dchain.VerifyBeacon(db.pubkey, b)
err := dchain.NewVerifier(scheme.GetSchemeFromEnv()).VerifyBeacon(*b, db.pubkey)
if err == nil {
db.cacheValue(curr)
}

View File

@ -3,6 +3,7 @@
package drand
import (
"context"
"os"
"testing"
@ -20,11 +21,11 @@ func TestPrintGroupInfo(t *testing.T) {
c, err := hclient.New(server, nil, nil)
assert.NoError(t, err)
cg := c.(interface {
FetchChainInfo(groupHash []byte) (*dchain.Info, error)
FetchChainInfo(ctx context.Context, groupHash []byte) (*dchain.Info, error)
})
chain, err := cg.FetchChainInfo(nil)
chain, err := cg.FetchChainInfo(context.Background(), nil)
assert.NoError(t, err)
err = chain.ToJSON(os.Stdout)
err = chain.ToJSON(os.Stdout, nil)
assert.NoError(t, err)
}

View File

@ -5,7 +5,7 @@ import (
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/lotus/build"
@ -17,7 +17,7 @@ type blockReceiptTracker struct {
// using an LRU cache because i don't want to handle all the edge cases for
// manual cleanup and maintenance of a fixed size set
cache *lru.Cache
cache *lru.Cache[types.TipSetKey, *peerSet]
}
type peerSet struct {
@ -25,7 +25,7 @@ type peerSet struct {
}
func newBlockReceiptTracker() *blockReceiptTracker {
c, _ := lru.New(512)
c, _ := lru.New[types.TipSetKey, *peerSet](512)
return &blockReceiptTracker{
cache: c,
}
@ -46,20 +46,18 @@ func (brt *blockReceiptTracker) Add(p peer.ID, ts *types.TipSet) {
return
}
val.(*peerSet).peers[p] = build.Clock.Now()
val.peers[p] = build.Clock.Now()
}
func (brt *blockReceiptTracker) GetPeers(ts *types.TipSet) []peer.ID {
brt.lk.Lock()
defer brt.lk.Unlock()
val, ok := brt.cache.Get(ts.Key())
ps, ok := brt.cache.Get(ts.Key())
if !ok {
return nil
}
ps := val.(*peerSet)
out := make([]peer.ID, 0, len(ps.peers))
for p := range ps.peers {
out = append(out, p)

514
chain/consensus/common.go Normal file
View File

@ -0,0 +1,514 @@
package consensus
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/go-multierror"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/multiformats/go-varint"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/lib/async"
"github.com/filecoin-project/lotus/metrics"
)
// Common operations shared by all consensus algorithm implementations.
var log = logging.Logger("consensus-common")
// RunAsyncChecks accepts a list of checks to perform in parallel.
//
// Each consensus algorithm may choose to perform a set of different
// checks when a new blocks is received.
func RunAsyncChecks(ctx context.Context, await []async.ErrorFuture) error {
var merr error
for _, fut := range await {
if err := fut.AwaitContext(ctx); err != nil {
merr = multierror.Append(merr, err)
}
}
if merr != nil {
mulErr := merr.(*multierror.Error)
mulErr.ErrorFormat = func(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %+v", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
return mulErr
}
return nil
}
// CommonBlkChecks performed by all consensus implementations.
func CommonBlkChecks(ctx context.Context, sm *stmgr.StateManager, cs *store.ChainStore,
b *types.FullBlock, baseTs *types.TipSet) []async.ErrorFuture {
h := b.Header
msgsCheck := async.Err(func() error {
if b.Cid() == build.WhitelistedBlock {
return nil
}
if err := checkBlockMessages(ctx, sm, cs, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
return nil
})
baseFeeCheck := async.Err(func() error {
baseFee, err := cs.ComputeBaseFee(ctx, baseTs)
if err != nil {
return xerrors.Errorf("computing base fee: %w", err)
}
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
b.Header.ParentBaseFee, baseFee)
}
return nil
})
stateRootCheck := async.Err(func() error {
stateroot, precp, err := sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
if stateroot != h.ParentStateRoot {
msgs, err := cs.MessagesForTipset(ctx, baseTs)
if err != nil {
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
} else {
log.Warn("Messages for tipset with mismatching state:")
for i, m := range msgs {
mm := m.VMMessage()
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
}
}
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot)
}
if precp != h.ParentMessageReceipts {
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
}
return nil
})
return []async.ErrorFuture{
msgsCheck,
baseFeeCheck,
stateRootCheck,
}
}
func IsValidForSending(nv network.Version, act *types.Actor) bool {
// Before nv18 (Hygge), we only supported built-in account actors as senders.
//
// Note: this gate is probably superfluous, since:
// 1. Placeholder actors cannot be created before nv18.
// 2. EthAccount actors cannot be created before nv18.
// 3. Delegated addresses cannot be created before nv18.
//
// But it's a safeguard.
//
// Note 2: ad-hoc checks for network versions like this across the codebase
// will be problematic with networks with diverging version lineages
// (e.g. Hyperspace). We need to revisit this strategy entirely.
if nv < network.Version18 {
return builtin.IsAccountActor(act.Code)
}
// After nv18, we also support other kinds of senders.
if builtin.IsAccountActor(act.Code) || builtin.IsEthAccountActor(act.Code) {
return true
}
// Allow placeholder actors with a delegated address and nonce 0 to send a message.
// These will be converted to an EthAccount actor on first send.
if !builtin.IsPlaceholderActor(act.Code) || act.Nonce != 0 || act.Address == nil || act.Address.Protocol() != address.Delegated {
return false
}
// Only allow such actors to send if their delegated address is in the EAM's namespace.
id, _, err := varint.FromUvarint(act.Address.Payload())
return err == nil && id == builtintypes.EthereumAddressManagerActorID
}
func checkBlockMessages(ctx context.Context, sm *stmgr.StateManager, cs *store.ChainStore, b *types.FullBlock, baseTs *types.TipSet) error {
{
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
var pubks [][]byte
for _, m := range b.BlsMessages {
sigCids = append(sigCids, m.Cid())
pubk, err := sm.GetBlsPublicKey(ctx, m.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
}
pubks = append(pubks, pubk)
}
if err := VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
}
}
nonces := make(map[address.Address]uint64)
stateroot, _, err := sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("failed to compute tipsettate for %s: %w", baseTs.Key(), err)
}
st, err := state.LoadStateTree(cs.ActorStore(ctx), stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
nv := sm.GetNetworkVersion(ctx, b.Header.Height)
pl := vm.PricelistByEpoch(b.Header.Height)
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
return xerrors.Errorf("msg %s invalid for block inclusion: %w", m.Cid(), err)
}
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
// So below is overflow safe
sumGasLimit += m.GasLimit
if sumGasLimit > build.BlockGasLimit {
return xerrors.Errorf("block gas limit exceeded")
}
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
var sender address.Address
if nv >= network.Version13 {
sender, err = st.LookupID(m.From)
if err != nil {
return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err)
}
} else {
sender = m.From
}
if _, ok := nonces[sender]; !ok {
// `GetActor` does not validate that this is an account actor.
act, err := st.GetActor(sender)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
if !IsValidForSending(nv, act) {
return xerrors.New("Sender must be an account actor")
}
nonces[sender] = act.Nonce
}
if nonces[sender] != m.Nonce {
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
}
nonces[sender]++
return nil
}
// Validate message arrays in a temporary blockstore.
tmpbs := bstore.NewMemory()
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
bmArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
}
c, err := store.PutMessage(ctx, tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := bmArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
}
}
smArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.SecpkMessages {
if nv >= network.Version14 && !IsValidSecpkSigType(nv, m.Signature.Type) {
return xerrors.Errorf("block had invalid signed message at index %d: %w", i, err)
}
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
}
// `From` being an account actor is only validated inside the `vm.ResolveToDeterministicAddr` call
// in `StateManager.ResolveToDeterministicAddress` here (and not in `checkMsg`).
kaddr, err := sm.ResolveToDeterministicAddress(ctx, m.Message.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to resolve key addr: %w", err)
}
if err := AuthenticateMessage(m, kaddr); err != nil {
return xerrors.Errorf("failed to validate signature: %w", err)
}
c, err := store.PutMessage(ctx, tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := smArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
}
}
bmroot, err := bmArr.Root()
if err != nil {
return xerrors.Errorf("failed to root bls msgs: %w", err)
}
smroot, err := smArr.Root()
if err != nil {
return xerrors.Errorf("failed to root secp msgs: %w", err)
}
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return xerrors.Errorf("failed to put msg meta: %w", err)
}
if b.Header.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header")
}
// Finally, flush.
err = vm.Copy(ctx, tmpbs, cs.ChainBlockstore(), mrcid)
if err != nil {
return xerrors.Errorf("failed to flush:%w", err)
}
return nil
}
// CreateBlockHeader generates the block header from the block template of
// the block being proposed.
func CreateBlockHeader(ctx context.Context, sm *stmgr.StateManager, pts *types.TipSet,
bt *api.BlockTemplate) (*types.BlockHeader, []*types.Message, []*types.SignedMessage, error) {
st, recpts, err := sm.TipSetState(ctx, pts)
if err != nil {
return nil, nil, nil, xerrors.Errorf("failed to load tipset state: %w", err)
}
next := &types.BlockHeader{
Miner: bt.Miner,
Parents: bt.Parents.Cids(),
Ticket: bt.Ticket,
ElectionProof: bt.Eproof,
BeaconEntries: bt.BeaconValues,
Height: bt.Epoch,
Timestamp: bt.Timestamp,
WinPoStProof: bt.WinningPoStProof,
ParentStateRoot: st,
ParentMessageReceipts: recpts,
}
var blsMessages []*types.Message
var secpkMessages []*types.SignedMessage
var blsMsgCids, secpkMsgCids []cid.Cid
var blsSigs []crypto.Signature
nv := sm.GetNetworkVersion(ctx, bt.Epoch)
for _, msg := range bt.Messages {
if msg.Signature.Type == crypto.SigTypeBLS {
blsSigs = append(blsSigs, msg.Signature)
blsMessages = append(blsMessages, &msg.Message)
c, err := sm.ChainStore().PutMessage(ctx, &msg.Message)
if err != nil {
return nil, nil, nil, err
}
blsMsgCids = append(blsMsgCids, c)
} else if IsValidSecpkSigType(nv, msg.Signature.Type) {
c, err := sm.ChainStore().PutMessage(ctx, msg)
if err != nil {
return nil, nil, nil, err
}
secpkMsgCids = append(secpkMsgCids, c)
secpkMessages = append(secpkMessages, msg)
} else {
return nil, nil, nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type)
}
}
store := sm.ChainStore().ActorStore(ctx)
blsmsgroot, err := ToMessagesArray(store, blsMsgCids)
if err != nil {
return nil, nil, nil, xerrors.Errorf("building bls amt: %w", err)
}
secpkmsgroot, err := ToMessagesArray(store, secpkMsgCids)
if err != nil {
return nil, nil, nil, xerrors.Errorf("building secpk amt: %w", err)
}
mmcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: blsmsgroot,
SecpkMessages: secpkmsgroot,
})
if err != nil {
return nil, nil, nil, err
}
next.Messages = mmcid
aggSig, err := AggregateSignatures(blsSigs)
if err != nil {
return nil, nil, nil, err
}
next.BLSAggregate = aggSig
pweight, err := sm.ChainStore().Weight(ctx, pts)
if err != nil {
return nil, nil, nil, err
}
next.ParentWeight = pweight
baseFee, err := sm.ChainStore().ComputeBaseFee(ctx, pts)
if err != nil {
return nil, nil, nil, xerrors.Errorf("computing base fee: %w", err)
}
next.ParentBaseFee = baseFee
return next, blsMessages, secpkMessages, err
}
// Basic sanity-checks performed when a block is proposed locally.
func validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) {
stats.Record(ctx, metrics.BlockPublished.M(1))
if size := msg.Size(); size > 1<<20-1<<15 {
log.Errorf("ignoring oversize block (%dB)", size)
return pubsub.ValidationIgnore, "oversize_block"
}
blk, what, err := decodeAndCheckBlock(msg)
if err != nil {
log.Errorf("got invalid local block: %s", err)
return pubsub.ValidationIgnore, what
}
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept, ""
}
func decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
blk, err := types.DecodeBlockMsg(msg.GetData())
if err != nil {
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
}
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
}
// make sure we have a signature
if blk.Header.BlockSig == nil {
return nil, "missing_signature", fmt.Errorf("block without a signature")
}
return blk, "", nil
}
func validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
// TODO there has to be a simpler way to do this without the blockstore dance
// block headers use adt0
store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory()))
bmArr := blockadt.MakeEmptyArray(store)
smArr := blockadt.MakeEmptyArray(store)
for i, m := range msg.BlsMessages {
c := cbg.CborCid(m)
if err := bmArr.Set(uint64(i), &c); err != nil {
return err
}
}
for i, m := range msg.SecpkMessages {
c := cbg.CborCid(m)
if err := smArr.Set(uint64(i), &c); err != nil {
return err
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if msg.Header.Messages != mrcid {
return fmt.Errorf("messages didn't match root cid in header")
}
return nil
}

View File

@ -1,8 +1,9 @@
package filcns
package consensus
import (
"context"
"sync/atomic"
"time"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
@ -26,7 +27,6 @@ import (
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/cron"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
@ -56,10 +56,12 @@ func NewActorRegistry() *vm.ActorRegistry {
return inv
}
type TipSetExecutor struct{}
type TipSetExecutor struct {
reward RewardFunc
}
func NewTipSetExecutor() *TipSetExecutor {
return &TipSetExecutor{}
func NewTipSetExecutor(r RewardFunc) *TipSetExecutor {
return &TipSetExecutor{reward: r}
}
func (t *TipSetExecutor) NewActorRegistry() *vm.ActorRegistry {
@ -113,6 +115,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return sm.VMConstructor()(ctx, vmopt)
}
var cronGas int64
runCron := func(vmCron vm.Interface, epoch abi.ChainEpoch) error {
cronMsg := &types.Message{
To: cron.Address,
@ -130,6 +134,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return xerrors.Errorf("running cron: %w", err)
}
cronGas += ret.GasUsed
if em != nil {
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on cron message: %w", err)
@ -181,7 +187,9 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
}
}
partDone()
vmEarly := partDone()
earlyCronGas := cronGas
cronGas = 0
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
vmi, err := makeVm(pstate, epoch, ts.MinTimestamp())
@ -196,6 +204,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
processedMsgs = make(map[cid.Cid]struct{})
)
var msgGas int64
for _, b := range bms {
penalty := types.NewInt(0)
gasReward := big.Zero()
@ -210,6 +220,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return cid.Undef, cid.Undef, err
}
msgGas += r.GasUsed
receipts = append(receipts, &r.MessageReceipt)
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
@ -227,50 +239,26 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
processedMsgs[m.Cid()] = struct{}{}
}
params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{
params := &reward.AwardBlockRewardParams{
Miner: b.Miner,
Penalty: penalty,
GasReward: gasReward,
WinCount: b.WinCount,
})
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
}
rwMsg := &types.Message{
From: builtin.SystemActorAddr,
To: reward.Address,
Nonce: uint64(epoch),
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: 1 << 30,
Method: reward.Methods.AwardBlockReward,
Params: params,
}
ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
if actErr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
}
if em != nil {
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
rErr := t.reward(ctx, vmi, em, epoch, ts, params)
if rErr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error applying reward: %w", rErr)
}
}
if ret.ExitCode != 0 {
return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
}
}
partDone()
vmMsg := partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
if err := runCron(vmi, epoch); err != nil {
return cid.Cid{}, cid.Cid{}, err
}
partDone()
vmCron := partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
rectarr := blockadt.MakeEmptyArray(sm.ChainStore().ActorStore(ctx))
@ -306,6 +294,11 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
}
vmFlush := partDone()
partDone = func() time.Duration { return time.Duration(0) }
log.Infow("ApplyBlocks stats", "early", vmEarly, "earlyCronGas", earlyCronGas, "vmMsg", vmMsg, "msgGas", msgGas, "vmCron", vmCron, "cronGas", cronGas, "vmFlush", vmFlush, "epoch", epoch, "tsk", ts.Key())
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))

View File

@ -4,46 +4,36 @@ import (
"bytes"
"context"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/multiformats/go-varint"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/lib/async"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
@ -69,6 +59,39 @@ type FilecoinEC struct {
// the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonitor,
epoch abi.ChainEpoch, ts *types.TipSet, params *reward.AwardBlockRewardParams) error {
ser, err := actors.SerializeParams(params)
if err != nil {
return xerrors.Errorf("failed to serialize award params: %w", err)
}
rwMsg := &types.Message{
From: builtin.SystemActorAddr,
To: reward.Address,
Nonce: uint64(epoch),
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: 1 << 30,
Method: reward.Methods.AwardBlockReward,
Params: ser,
}
ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
if actErr != nil {
return xerrors.Errorf("failed to apply reward message: %w", actErr)
}
if em != nil {
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on reward message: %w", err)
}
}
if ret.ExitCode != 0 {
return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
}
return nil
}
func NewFilecoinExpectedConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, verifier storiface.Verifier, genesis chain.Genesis) consensus.Consensus {
if build.InsecurePoStValidation {
log.Warn("*********************************************************************************************")
@ -127,17 +150,6 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
}
msgsCheck := async.Err(func() error {
if b.Cid() == build.WhitelistedBlock {
return nil
}
if err := filec.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
return nil
})
minerCheck := async.Err(func() error {
if err := filec.minerIsValid(ctx, h.Miner, baseTs); err != nil {
return xerrors.Errorf("minerIsValid failed: %w", err)
@ -145,17 +157,6 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
return nil
})
baseFeeCheck := async.Err(func() error {
baseFee, err := filec.store.ComputeBaseFee(ctx, baseTs)
if err != nil {
return xerrors.Errorf("computing base fee: %w", err)
}
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
b.Header.ParentBaseFee, baseFee)
}
return nil
})
pweight, err := filec.store.Weight(ctx, baseTs)
if err != nil {
return xerrors.Errorf("getting parent weight: %w", err)
@ -166,34 +167,6 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
b.Header.ParentWeight, pweight)
}
stateRootCheck := async.Err(func() error {
stateroot, precp, err := filec.sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
if stateroot != h.ParentStateRoot {
msgs, err := filec.store.MessagesForTipset(ctx, baseTs)
if err != nil {
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
} else {
log.Warn("Messages for tipset with mismatching state:")
for i, m := range msgs {
mm := m.VMMessage()
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
}
}
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot)
}
if precp != h.ParentMessageReceipts {
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
}
return nil
})
// Stuff that needs worker address
waddr, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, h.Miner)
if err != nil {
@ -255,10 +228,11 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
})
blockSigCheck := async.Err(func() error {
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
if err := verifyBlockSignature(ctx, h, waddr); err != nil {
return xerrors.Errorf("check block signature failed: %w", err)
}
return nil
})
beaconValuesCheck := async.Err(func() error {
@ -307,44 +281,17 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
return nil
})
await := []async.ErrorFuture{
commonChecks := consensus.CommonBlkChecks(ctx, filec.sm, filec.store, b, baseTs)
await := append([]async.ErrorFuture{
minerCheck,
tktsCheck,
blockSigCheck,
beaconValuesCheck,
wproofCheck,
winnerCheck,
msgsCheck,
baseFeeCheck,
stateRootCheck,
}
}, commonChecks...)
var merr error
for _, fut := range await {
if err := fut.AwaitContext(ctx); err != nil {
merr = multierror.Append(merr, err)
}
}
if merr != nil {
mulErr := merr.(*multierror.Error)
mulErr.ErrorFormat = func(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %+v", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
return mulErr
}
return nil
return consensus.RunAsyncChecks(ctx, await)
}
func blockSanityChecks(h *types.BlockHeader) error {
@ -435,209 +382,6 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
return nil
}
func IsValidForSending(nv network.Version, act *types.Actor) bool {
// Before nv18 (Hygge), we only supported built-in account actors as senders.
//
// Note: this gate is probably superfluous, since:
// 1. Placeholder actors cannot be created before nv18.
// 2. EthAccount actors cannot be created before nv18.
// 3. Delegated addresses cannot be created before nv18.
//
// But it's a safeguard.
//
// Note 2: ad-hoc checks for network versions like this across the codebase
// will be problematic with networks with diverging version lineages
// (e.g. Hyperspace). We need to revisit this strategy entirely.
if nv < network.Version18 {
return builtin.IsAccountActor(act.Code)
}
// After nv18, we also support other kinds of senders.
if builtin.IsAccountActor(act.Code) || builtin.IsEthAccountActor(act.Code) {
return true
}
// Allow placeholder actors with a delegated address and nonce 0 to send a message.
// These will be converted to an EthAccount actor on first send.
if !builtin.IsPlaceholderActor(act.Code) || act.Nonce != 0 || act.Address == nil || act.Address.Protocol() != address.Delegated {
return false
}
// Only allow such actors to send if their delegated address is in the EAM's namespace.
id, _, err := varint.FromUvarint(act.Address.Payload())
return err == nil && id == builtintypes.EthereumAddressManagerActorID
}
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
{
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
var pubks [][]byte
for _, m := range b.BlsMessages {
sigCids = append(sigCids, m.Cid())
pubk, err := filec.sm.GetBlsPublicKey(ctx, m.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
}
pubks = append(pubks, pubk)
}
if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
}
}
nonces := make(map[address.Address]uint64)
stateroot, _, err := filec.sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("failed to compute tipsettate for %s: %w", baseTs.Key(), err)
}
st, err := state.LoadStateTree(filec.store.ActorStore(ctx), stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
pl := vm.PricelistByEpoch(b.Header.Height)
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
return xerrors.Errorf("msg %s invalid for block inclusion: %w", m.Cid(), err)
}
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
// So below is overflow safe
sumGasLimit += m.GasLimit
if sumGasLimit > build.BlockGasLimit {
return xerrors.Errorf("block gas limit exceeded")
}
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
var sender address.Address
if nv >= network.Version13 {
sender, err = st.LookupID(m.From)
if err != nil {
return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err)
}
} else {
sender = m.From
}
if _, ok := nonces[sender]; !ok {
// `GetActor` does not validate that this is an account actor.
act, err := st.GetActor(sender)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
if !IsValidForSending(nv, act) {
return xerrors.New("Sender must be an account actor")
}
nonces[sender] = act.Nonce
}
if nonces[sender] != m.Nonce {
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
}
nonces[sender]++
return nil
}
// Validate message arrays in a temporary blockstore.
tmpbs := bstore.NewMemory()
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
bmArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
}
c, err := store.PutMessage(ctx, tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := bmArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
}
}
smArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.SecpkMessages {
if nv >= network.Version14 && !chain.IsValidSecpkSigType(nv, m.Signature.Type) {
return xerrors.Errorf("block had invalid signed message at index %d: %w", i, err)
}
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
}
// `From` being an account actor is only validated inside the `vm.ResolveToDeterministicAddr` call
// in `StateManager.ResolveToDeterministicAddress` here (and not in `checkMsg`).
kaddr, err := filec.sm.ResolveToDeterministicAddress(ctx, m.Message.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to resolve key addr: %w", err)
}
if err := chain.AuthenticateMessage(m, kaddr); err != nil {
return xerrors.Errorf("failed to validate signature: %w", err)
}
c, err := store.PutMessage(ctx, tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := smArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
}
}
bmroot, err := bmArr.Root()
if err != nil {
return xerrors.Errorf("failed to root bls msgs: %w", err)
}
smroot, err := smArr.Root()
if err != nil {
return xerrors.Errorf("failed to root secp msgs: %w", err)
}
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return xerrors.Errorf("failed to put msg meta: %w", err)
}
if b.Header.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header")
}
// Finally, flush.
err = vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid)
if err != nil {
return xerrors.Errorf("failed to flush:%w", err)
}
return nil
}
func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
if filec.genesis == nil {
return false
@ -693,140 +437,7 @@ func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []
var ErrSoftFailure = errors.New("soft validation failure")
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
func (filec *FilecoinEC) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) {
if self {
return filec.validateLocalBlock(ctx, msg)
}
// track validation time
begin := build.Clock.Now()
defer func() {
log.Debugf("block validation time: %s", build.Clock.Since(begin))
}()
stats.Record(ctx, metrics.BlockReceived.M(1))
recordFailureFlagPeer := func(what string) {
// bv.Validate will flag the peer in that case
panic(what)
}
blk, what, err := filec.decodeAndCheckBlock(msg)
if err != nil {
log.Error("got invalid block over pubsub: ", err)
recordFailureFlagPeer(what)
return pubsub.ValidationReject, what
}
// validate the block meta: the Message CID in the header must match the included messages
err = filec.validateMsgMeta(ctx, blk)
if err != nil {
log.Warnf("error validating message metadata: %s", err)
recordFailureFlagPeer("invalid_block_meta")
return pubsub.ValidationReject, "invalid_block_meta"
}
reject, err := filec.validateBlockHeader(ctx, blk.Header)
if err != nil {
if reject == "" {
log.Warn("ignoring block msg: ", err)
return pubsub.ValidationIgnore, reject
}
recordFailureFlagPeer(reject)
return pubsub.ValidationReject, reject
}
// all good, accept the block
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept, ""
}
func (filec *FilecoinEC) validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) {
stats.Record(ctx, metrics.BlockPublished.M(1))
if size := msg.Size(); size > 1<<20-1<<15 {
log.Errorf("ignoring oversize block (%dB)", size)
return pubsub.ValidationIgnore, "oversize_block"
}
blk, what, err := filec.decodeAndCheckBlock(msg)
if err != nil {
log.Errorf("got invalid local block: %s", err)
return pubsub.ValidationIgnore, what
}
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept, ""
}
func (filec *FilecoinEC) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
blk, err := types.DecodeBlockMsg(msg.GetData())
if err != nil {
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
}
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
}
// make sure we have a signature
if blk.Header.BlockSig == nil {
return nil, "missing_signature", fmt.Errorf("block without a signature")
}
return blk, "", nil
}
func (filec *FilecoinEC) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
// TODO there has to be a simpler way to do this without the blockstore dance
// block headers use adt0
store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory()))
bmArr := blockadt.MakeEmptyArray(store)
smArr := blockadt.MakeEmptyArray(store)
for i, m := range msg.BlsMessages {
c := cbg.CborCid(m)
if err := bmArr.Set(uint64(i), &c); err != nil {
return err
}
}
for i, m := range msg.SecpkMessages {
c := cbg.CborCid(m)
if err := smArr.Set(uint64(i), &c); err != nil {
return err
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if msg.Header.Messages != mrcid {
return fmt.Errorf("messages didn't match root cid in header")
}
return nil
}
func (filec *FilecoinEC) validateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) {
func (filec *FilecoinEC) ValidateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) {
// we want to ensure that it is a block from a known miner; we reject blocks from unknown miners
// to prevent spam attacks.
@ -901,4 +512,27 @@ func (filec *FilecoinEC) isChainNearSynced() bool {
return build.Clock.Since(timestampTime) < 6*time.Hour
}
func verifyBlockSignature(ctx context.Context, h *types.BlockHeader,
addr address.Address) error {
return sigs.CheckBlockSignature(ctx, h, addr)
}
func signBlock(ctx context.Context, w api.Wallet,
addr address.Address, next *types.BlockHeader) error {
nosigbytes, err := next.SigningBytes()
if err != nil {
return xerrors.Errorf("failed to get signing bytes for block: %w", err)
}
sig, err := w.WalletSign(ctx, addr, nosigbytes, api.MsgMeta{
Type: api.MTBlock,
})
if err != nil {
return xerrors.Errorf("failed to sign new block: %w", err)
}
next.BlockSig = sig
return nil
}
var _ consensus.Consensus = &FilecoinEC{}

View File

@ -3,13 +3,9 @@ package filcns
import (
"context"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
@ -21,11 +17,6 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
}
st, recpts, err := filec.sm.TipSetState(ctx, pts)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset state: %w", err)
}
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, pts, bt.Epoch)
if err != nil {
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
@ -36,102 +27,15 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.
return nil, xerrors.Errorf("failed to get miner worker: %w", err)
}
next := &types.BlockHeader{
Miner: bt.Miner,
Parents: bt.Parents.Cids(),
Ticket: bt.Ticket,
ElectionProof: bt.Eproof,
BeaconEntries: bt.BeaconValues,
Height: bt.Epoch,
Timestamp: bt.Timestamp,
WinPoStProof: bt.WinningPoStProof,
ParentStateRoot: st,
ParentMessageReceipts: recpts,
}
var blsMessages []*types.Message
var secpkMessages []*types.SignedMessage
var blsMsgCids, secpkMsgCids []cid.Cid
var blsSigs []crypto.Signature
nv := filec.sm.GetNetworkVersion(ctx, bt.Epoch)
for _, msg := range bt.Messages {
if msg.Signature.Type == crypto.SigTypeBLS {
blsSigs = append(blsSigs, msg.Signature)
blsMessages = append(blsMessages, &msg.Message)
c, err := filec.sm.ChainStore().PutMessage(ctx, &msg.Message)
next, blsMessages, secpkMessages, err := consensus.CreateBlockHeader(ctx, filec.sm, pts, bt)
if err != nil {
return nil, err
return nil, xerrors.Errorf("failed to process messages from block template: %w", err)
}
blsMsgCids = append(blsMsgCids, c)
} else if chain.IsValidSecpkSigType(nv, msg.Signature.Type) {
c, err := filec.sm.ChainStore().PutMessage(ctx, msg)
if err != nil {
return nil, err
}
secpkMsgCids = append(secpkMsgCids, c)
secpkMessages = append(secpkMessages, msg)
} else {
return nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type)
}
}
store := filec.sm.ChainStore().ActorStore(ctx)
blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids)
if err != nil {
return nil, xerrors.Errorf("building bls amt: %w", err)
}
secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids)
if err != nil {
return nil, xerrors.Errorf("building secpk amt: %w", err)
}
mmcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: blsmsgroot,
SecpkMessages: secpkmsgroot,
})
if err != nil {
return nil, err
}
next.Messages = mmcid
aggSig, err := consensus.AggregateSignatures(blsSigs)
if err != nil {
return nil, err
}
next.BLSAggregate = aggSig
pweight, err := filec.sm.ChainStore().Weight(ctx, pts)
if err != nil {
return nil, err
}
next.ParentWeight = pweight
baseFee, err := filec.sm.ChainStore().ComputeBaseFee(ctx, pts)
if err != nil {
return nil, xerrors.Errorf("computing base fee: %w", err)
}
next.ParentBaseFee = baseFee
nosigbytes, err := next.SigningBytes()
if err != nil {
return nil, xerrors.Errorf("failed to get signing bytes for block: %w", err)
}
sig, err := w.WalletSign(ctx, worker, nosigbytes, api.MsgMeta{
Type: api.MTBlock,
})
if err != nil {
if err := signBlock(ctx, w, worker, next); err != nil {
return nil, xerrors.Errorf("failed to sign new block: %w", err)
}
next.BlockSig = sig
fullBlock := &types.FullBlock{
Header: next,
BlsMessages: blsMessages,

View File

@ -533,11 +533,10 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *stmgr.StateManager, _ st
MessageReceipt: *stmgr.MakeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
MsgRct: stmgr.MakeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
Msg: types.MessageTrace{
To: fakeMsg.To,
From: fakeMsg.From,
},
Subcalls: subcalls,
},
Duration: 0,
@ -711,11 +710,10 @@ func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr addre
MessageReceipt: *stmgr.MakeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
MsgRct: stmgr.MakeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
Msg: types.MessageTrace{
From: fakeMsg.From,
To: fakeMsg.To,
},
Subcalls: subcalls,
},
Duration: 0,

View File

@ -4,17 +4,99 @@ import (
"context"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"go.opencensus.io/stats"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/metrics"
)
type Consensus interface {
// ValidateBlockHeader is called by peers when they receive a new block through the network.
//
// This is a fast sanity-check validation performed by the PubSub protocol before delivering
// it to the syncer. It checks that the block has the right format and it performs
// other consensus-specific light verifications like ensuring that the block is signed by
// a valid miner, or that it includes all the data required for a full verification.
ValidateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error)
// ValidateBlock is called by the syncer to determine if to accept a block or not.
//
// It performs all the checks needed by the syncer to accept
// the block (signature verifications, VRF checks, message validity, etc.)
ValidateBlock(ctx context.Context, b *types.FullBlock) (err error)
ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string)
// IsEpochBeyondCurrMax is used to configure the fork rules for longest-chain
// consensus protocols.
IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool
// CreateBlock implements all the logic required to propose and assemble a new Filecoin block.
//
// This function encapsulate all the consensus-specific actions to propose a new block
// such as the ordering of transactions, the inclusion of consensus proofs, the signature
// of the block, etc.
CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error)
}
// RewardFunc parametrizes the logic for rewards when a message is executed.
//
// Each consensus implementation can set their own reward function.
type RewardFunc func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonitor,
epoch abi.ChainEpoch, ts *types.TipSet, params *reward.AwardBlockRewardParams) error
// ValidateBlockPubsub implements the common checks performed by all consensus implementations
// when a block is received through the pubsub channel.
func ValidateBlockPubsub(ctx context.Context, cns Consensus, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) {
if self {
return validateLocalBlock(ctx, msg)
}
// track validation time
begin := build.Clock.Now()
defer func() {
log.Debugf("block validation time: %s", build.Clock.Since(begin))
}()
stats.Record(ctx, metrics.BlockReceived.M(1))
recordFailureFlagPeer := func(what string) {
// bv.Validate will flag the peer in that case
panic(what)
}
blk, what, err := decodeAndCheckBlock(msg)
if err != nil {
log.Error("got invalid block over pubsub: ", err)
recordFailureFlagPeer(what)
return pubsub.ValidationReject, what
}
// validate the block meta: the Message CID in the header must match the included messages
err = validateMsgMeta(ctx, blk)
if err != nil {
log.Warnf("error validating message metadata: %s", err)
recordFailureFlagPeer("invalid_block_meta")
return pubsub.ValidationReject, "invalid_block_meta"
}
reject, err := cns.ValidateBlockHeader(ctx, blk.Header)
if err != nil {
if reject == "" {
log.Warn("ignoring block msg: ", err)
return pubsub.ValidationIgnore, reject
}
recordFailureFlagPeer(reject)
return pubsub.ValidationReject, reject
}
// all good, accept the block
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept, ""
}

View File

@ -1,4 +1,4 @@
package chain
package consensus
import (
"golang.org/x/xerrors"

View File

@ -4,7 +4,7 @@ import (
"context"
"sync"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/api"
@ -14,14 +14,14 @@ type messageCache struct {
api EventAPI
blockMsgLk sync.Mutex
blockMsgCache *lru.ARCCache
blockMsgCache *lru.ARCCache[cid.Cid, *api.BlockMessages]
}
func newMessageCache(api EventAPI) *messageCache {
blsMsgCache, _ := lru.NewARC(500)
func newMessageCache(a EventAPI) *messageCache {
blsMsgCache, _ := lru.NewARC[cid.Cid, *api.BlockMessages](500)
return &messageCache{
api: api,
api: a,
blockMsgCache: blsMsgCache,
}
}
@ -30,14 +30,14 @@ func (c *messageCache) ChainGetBlockMessages(ctx context.Context, blkCid cid.Cid
c.blockMsgLk.Lock()
defer c.blockMsgLk.Unlock()
msgsI, ok := c.blockMsgCache.Get(blkCid)
msgs, ok := c.blockMsgCache.Get(blkCid)
var err error
if !ok {
msgsI, err = c.api.ChainGetBlockMessages(ctx, blkCid)
msgs, err = c.api.ChainGetBlockMessages(ctx, blkCid)
if err != nil {
return nil, err
}
c.blockMsgCache.Add(blkCid, msgsI)
c.blockMsgCache.Add(blkCid, msgs)
}
return msgsI.(*api.BlockMessages), nil
return msgs, nil
}

View File

@ -5,7 +5,7 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"sync/atomic"
"time"
@ -31,6 +31,7 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/rand"
@ -166,7 +167,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
maddr1 := genesis2.MinerAddress(0)
m1temp, err := ioutil.TempDir("", "preseal")
m1temp, err := os.MkdirTemp("", "preseal")
if err != nil {
return nil, err
}
@ -178,7 +179,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
maddr2 := genesis2.MinerAddress(1)
m2temp, err := ioutil.TempDir("", "preseal")
m2temp, err := os.MkdirTemp("", "preseal")
if err != nil {
return nil, err
}
@ -255,7 +256,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
//}
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), sys, us, beac)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds)
if err != nil {
return nil, xerrors.Errorf("initing stmgr: %w", err)
}

View File

@ -38,7 +38,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/actors/builtin/system"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -486,7 +486,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
Epoch: 0,
Rand: &fakeRand{},
Bstore: cs.StateBlockstore(),
Actors: filcns.NewActorRegistry(),
Actors: consensus.NewActorRegistry(),
Syscalls: mkFakedSigSyscalls(sys),
CircSupplyCalc: csc,
NetworkVersion: nv,

View File

@ -42,7 +42,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -96,7 +96,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
Epoch: 0,
Rand: &fakeRand{},
Bstore: cs.StateBlockstore(),
Actors: filcns.NewActorRegistry(),
Actors: consensus.NewActorRegistry(),
Syscalls: mkFakedSigSyscalls(sys),
CircSupplyCalc: csc,
NetworkVersion: nv,

View File

@ -12,7 +12,7 @@ import (
"time"
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
@ -33,8 +33,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -160,7 +159,7 @@ type MessagePool struct {
// pruneCooldown is a channel used to allow a cooldown time between prunes
pruneCooldown chan struct{}
blsSigCache *lru.TwoQueueCache
blsSigCache *lru.TwoQueueCache[cid.Cid, crypto.Signature]
changes *lps.PubSub
@ -168,9 +167,9 @@ type MessagePool struct {
netName dtypes.NetworkName
sigValCache *lru.TwoQueueCache
sigValCache *lru.TwoQueueCache[string, struct{}]
nonceCache *lru.Cache
nonceCache *lru.Cache[nonceCacheKey, uint64]
evtTypes [3]journal.EventType
journal journal.Journal
@ -370,9 +369,9 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
}
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
noncecache, _ := lru.New(256)
cache, _ := lru.New2Q[cid.Cid, crypto.Signature](build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q[string, struct{}](build.VerifSigCacheSize)
noncecache, _ := lru.New[nonceCacheKey, uint64](256)
cfg, err := loadConfig(ctx, ds)
if err != nil {
@ -798,7 +797,7 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
return nil
}
if err := chain.AuthenticateMessage(m, m.Message.From); err != nil {
if err := consensus.AuthenticateMessage(m, m.Message.From); err != nil {
return xerrors.Errorf("failed to validate signature: %w", err)
}
@ -866,7 +865,7 @@ func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs
nv := mp.api.StateNetworkVersion(ctx, epoch)
// TODO: I'm not thrilled about depending on filcns here, but I prefer this to duplicating logic
if !filcns.IsValidForSending(nv, senderAct) {
if !consensus.IsValidForSending(nv, senderAct) {
return false, xerrors.Errorf("sender actor %s is not a valid top-level sender", m.Message.From)
}
@ -1054,7 +1053,7 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address,
n, ok := mp.nonceCache.Get(nk)
if ok {
return n.(uint64), nil
return n, nil
}
act, err := mp.api.GetActorAfter(addr, ts)
@ -1474,15 +1473,10 @@ func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.Bloc
}
func (mp *MessagePool) RecoverSig(msg *types.Message) *types.SignedMessage {
val, ok := mp.blsSigCache.Get(msg.Cid())
sig, ok := mp.blsSigCache.Get(msg.Cid())
if !ok {
return nil
}
sig, ok := val.(crypto.Signature)
if !ok {
log.Errorf("value in signature cache was not a signature (got %T)", val)
return nil
}
return &types.SignedMessage{
Message: *msg,

View File

@ -52,8 +52,8 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
}
// CallWithGas calculates the state for a given tipset, and then applies the given message on top of that state.
func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet) (*api.InvocResult, error) {
return sm.callInternal(ctx, msg, priorMsgs, ts, cid.Undef, sm.GetNetworkVersion, true, true)
func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, priorMsgs []types.ChainMsg, ts *types.TipSet, applyTsMessages bool) (*api.InvocResult, error) {
return sm.callInternal(ctx, msg, priorMsgs, ts, cid.Undef, sm.GetNetworkVersion, true, applyTsMessages)
}
// CallAtStateAndVersion allows you to specify a message to execute on the given stateCid and network version.
@ -117,12 +117,22 @@ func (sm *StateManager) callInternal(ctx context.Context, msg *types.Message, pr
if stateCid == cid.Undef {
stateCid = ts.ParentState()
}
if applyTsMessages {
tsMsgs, err := sm.cs.MessagesForTipset(ctx, ts)
if err != nil {
return nil, xerrors.Errorf("failed to lookup messages for parent tipset: %w", err)
}
if applyTsMessages {
priorMsgs = append(tsMsgs, priorMsgs...)
} else {
var filteredTsMsgs []types.ChainMsg
for _, tsMsg := range tsMsgs {
//TODO we should technically be normalizing the filecoin address of from when we compare here
if tsMsg.VMMessage().From == msg.VMMessage().From {
filteredTsMsgs = append(filteredTsMsgs, tsMsg)
}
}
priorMsgs = append(filteredTsMsgs, priorMsgs...)
}
// Technically, the tipset we're passing in here should be ts+1, but that may not exist.

View File

@ -8,6 +8,7 @@ import (
"go.opencensus.io/trace"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
@ -52,6 +53,22 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
sm.stlk.Unlock()
if ts.Height() == 0 {
// NB: This is here because the process that executes blocks requires that the
// block miner reference a valid miner in the state tree. Unless we create some
// magical genesis miner, this won't work properly, so we short circuit here
// This avoids the question of 'who gets paid the genesis block reward'.
// This also makes us not attempt to lookup the tipset state with
// tryLookupTipsetState, which would cause a very long, very slow walk.
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
}
// First, try to find the tipset in the current chain. If found, we can avoid re-executing
// it.
if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found {
return st, rec, nil
}
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false)
if err != nil {
return cid.Undef, cid.Undef, err
@ -60,6 +77,51 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return st, rec, nil
}
// Try to lookup a state & receipt CID for a given tipset by walking the chain instead of executing
// it. This will only successfully return the state/receipt CIDs if they're found in the state
// store.
//
// NOTE: This _won't_ recursively walk the receipt/state trees. It assumes that having the root
// implies having the rest of the tree. However, lotus generally makes that assumption anyways.
func tryLookupTipsetState(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) (cid.Cid, cid.Cid, bool) {
nextTs, err := cs.GetTipsetByHeight(ctx, ts.Height()+1, nil, false)
if err != nil {
// Nothing to see here. The requested height may be beyond the current head.
return cid.Undef, cid.Undef, false
}
// Make sure we're on the correct fork.
if nextTs.Parents() != ts.Key() {
// Also nothing to see here. This just means that the requested tipset is on a
// different fork.
return cid.Undef, cid.Undef, false
}
stateCid := nextTs.ParentState()
receiptCid := nextTs.ParentMessageReceipts()
// Make sure we have the parent state.
if hasState, err := cs.StateBlockstore().Has(ctx, stateCid); err != nil {
log.Errorw("failed to lookup state-root in blockstore", "cid", stateCid, "error", err)
return cid.Undef, cid.Undef, false
} else if !hasState {
// We have the chain but don't have the state. It looks like we need to try
// executing?
return cid.Undef, cid.Undef, false
}
// Make sure we have the receipts.
if hasReceipts, err := cs.ChainBlockstore().Has(ctx, receiptCid); err != nil {
log.Errorw("failed to lookup receipts in blockstore", "cid", receiptCid, "error", err)
return cid.Undef, cid.Undef, false
} else if !hasReceipts {
// If we don't have the receipts, re-execute and try again.
return cid.Undef, cid.Undef, false
}
return stateCid, receiptCid, true
}
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em, true)
return st, err

View File

@ -4,7 +4,9 @@ import (
"bytes"
"context"
"encoding/binary"
"os"
"sort"
"strings"
"sync"
"time"
@ -26,6 +28,9 @@ import (
"github.com/filecoin-project/lotus/chain/vm"
)
// EnvDisablePreMigrations when set to '1' stops pre-migrations from running
const EnvDisablePreMigrations = "LOTUS_DISABLE_PRE_MIGRATIONS"
// MigrationCache can be used to cache information used by a migration. This is primarily useful to
// "pre-compute" some migration state ahead of time, and make it accessible in the migration itself.
type MigrationCache interface {
@ -169,9 +174,16 @@ func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, err
func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root
var err error
u := sm.stateMigrations[height]
if u != nil && u.upgrade != nil {
migCid, ok, err := u.migrationResultCache.Get(ctx, root)
if err == nil && ok {
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
return migCid, nil
} else if err != nil {
log.Errorw("failed to lookup previous migration result", "err", err)
}
startTime := time.Now()
log.Warnw("STARTING migration", "height", height, "from", root)
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
@ -192,6 +204,11 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig
"to", retCid,
"duration", time.Since(startTime),
)
// Only set if migration ran, we do not want a root => root mapping
if err := u.migrationResultCache.Store(ctx, root, retCid); err != nil {
log.Errorw("failed to store migration result", "err", err)
}
}
return retCid, nil
@ -218,6 +235,11 @@ func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc,
height := ts.Height()
parent := ts.ParentState()
if disabled := os.Getenv(EnvDisablePreMigrations); strings.TrimSpace(disabled) == "1" {
log.Warnw("SKIPPING pre-migration", "height", height)
return
}
startTime := time.Now()
log.Warn("STARTING pre-migration")
@ -347,12 +369,11 @@ func DoTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
// record the transfer in execution traces
cb(types.ExecutionTrace{
Msg: MakeFakeMsg(from, to, amt, 0),
MsgRct: MakeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
Subcalls: nil,
Msg: types.MessageTrace{
From: from,
To: to,
Value: amt,
},
})
}

View File

@ -5,10 +5,12 @@ import (
"context"
"fmt"
"io"
"os"
"sync"
"testing"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
ipldcbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
@ -31,8 +33,10 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin"
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/stmgr"
. "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
@ -128,7 +132,7 @@ func TestForkHeightTriggers(t *testing.T) {
}
sm, err := NewStateManager(
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
cg.ChainStore(), consensus.NewTipSetExecutor(filcns.RewardFunc), cg.StateManager().VMSys(), UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
@ -164,12 +168,12 @@ func TestForkHeightTriggers(t *testing.T) {
}
return st.Flush(ctx)
}}}, cg.BeaconSchedule())
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
if err != nil {
t.Fatal(err)
}
inv := filcns.NewActorRegistry()
inv := consensus.NewActorRegistry()
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}})
inv.Register(actorstypes.Version0, nil, registry)
@ -274,7 +278,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
var migrationCount int
sm, err := NewStateManager(
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
cg.ChainStore(), consensus.NewTipSetExecutor(filcns.RewardFunc), cg.StateManager().VMSys(), UpgradeSchedule{{
Network: network.Version1,
Expensive: true,
Height: testForkHeight,
@ -282,12 +286,12 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
migrationCount++
return root, nil
}}}, cg.BeaconSchedule())
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
if err != nil {
t.Fatal(err)
}
inv := filcns.NewActorRegistry()
inv := consensus.NewActorRegistry()
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}})
inv.Register(actorstypes.Version0, nil, registry)
@ -336,7 +340,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
currentHeight := ts.TipSet.TipSet().Height()
// CallWithGas calls on top of the given tipset.
ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet())
ret, err := sm.CallWithGas(ctx, m, nil, ts.TipSet.TipSet(), true)
if parentHeight <= testForkHeight && currentHeight >= testForkHeight {
// If I had a fork, or I _will_ have a fork, it should fail.
require.Equal(t, ErrExpensiveFork, err)
@ -357,7 +361,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
// Calls without a tipset should walk back to the last non-fork tipset.
// We _verify_ that the migration wasn't run multiple times at the end of the
// test.
ret, err = sm.CallWithGas(ctx, m, nil, nil)
ret, err = sm.CallWithGas(ctx, m, nil, nil, true)
require.NoError(t, err)
require.True(t, ret.MsgRct.ExitCode.IsSuccess())
@ -412,7 +416,7 @@ func TestForkPreMigration(t *testing.T) {
counter := make(chan struct{}, 10)
sm, err := NewStateManager(
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
cg.ChainStore(), consensus.NewTipSetExecutor(filcns.RewardFunc), cg.StateManager().VMSys(), UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
@ -500,7 +504,7 @@ func TestForkPreMigration(t *testing.T) {
return nil
},
}}},
}, cg.BeaconSchedule())
}, cg.BeaconSchedule(), datastore.NewMapDatastore())
if err != nil {
t.Fatal(err)
}
@ -509,7 +513,7 @@ func TestForkPreMigration(t *testing.T) {
require.NoError(t, sm.Stop(context.Background()))
}()
inv := filcns.NewActorRegistry()
inv := consensus.NewActorRegistry()
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}})
inv.Register(actorstypes.Version0, nil, registry)
@ -534,3 +538,170 @@ func TestForkPreMigration(t *testing.T) {
// to this channel.
require.Equal(t, 6, len(counter))
}
func TestDisablePreMigration(t *testing.T) {
logging.SetAllLoggers(logging.LevelInfo)
cg, err := gen.NewGenerator()
require.NoError(t, err)
err = os.Setenv(EnvDisablePreMigrations, "1")
require.NoError(t, err)
defer func() {
err := os.Unsetenv(EnvDisablePreMigrations)
require.NoError(t, err)
}()
counter := make(chan struct{}, 10)
sm, err := NewStateManager(
cg.ChainStore(),
consensus.NewTipSetExecutor(filcns.RewardFunc),
cg.StateManager().VMSys(),
UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor,
root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
counter <- struct{}{}
return root, nil
},
PreMigrations: []PreMigration{{
StartWithin: 20,
PreMigration: func(ctx context.Context, _ *StateManager, _ MigrationCache,
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
panic("should be skipped")
},
}}},
},
cg.BeaconSchedule(),
datastore.NewMapDatastore(),
)
require.NoError(t, err)
require.NoError(t, sm.Start(context.Background()))
defer func() {
require.NoError(t, sm.Stop(context.Background()))
}()
inv := consensus.NewActorRegistry()
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}})
inv.Register(actorstypes.Version0, nil, registry)
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewLegacyVM(ctx, vmopt)
require.NoError(t, err)
nvm.SetInvoker(inv)
return nvm, nil
})
cg.SetStateManager(sm)
for i := 0; i < 50; i++ {
_, err := cg.NextTipSet()
require.NoError(t, err)
}
require.Equal(t, 1, len(counter))
}
func TestMigrtionCache(t *testing.T) {
logging.SetAllLoggers(logging.LevelInfo)
cg, err := gen.NewGenerator()
require.NoError(t, err)
counter := make(chan struct{}, 10)
metadataDs := datastore.NewMapDatastore()
sm, err := NewStateManager(
cg.ChainStore(),
consensus.NewTipSetExecutor(filcns.RewardFunc),
cg.StateManager().VMSys(),
UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor,
root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
counter <- struct{}{}
return root, nil
}},
},
cg.BeaconSchedule(),
metadataDs,
)
require.NoError(t, err)
require.NoError(t, sm.Start(context.Background()))
defer func() {
require.NoError(t, sm.Stop(context.Background()))
}()
inv := consensus.NewActorRegistry()
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}})
inv.Register(actorstypes.Version0, nil, registry)
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewLegacyVM(ctx, vmopt)
require.NoError(t, err)
nvm.SetInvoker(inv)
return nvm, nil
})
cg.SetStateManager(sm)
for i := 0; i < 50; i++ {
_, err := cg.NextTipSet()
require.NoError(t, err)
}
ts, err := cg.ChainStore().GetTipsetByHeight(context.Background(), testForkHeight, nil, false)
require.NoError(t, err)
root, _, err := stmgr.ComputeState(context.Background(), sm, testForkHeight+1, []*types.Message{}, ts)
require.NoError(t, err)
t.Log(root)
require.Equal(t, 1, len(counter))
{
sm, err := NewStateManager(
cg.ChainStore(),
consensus.NewTipSetExecutor(filcns.RewardFunc),
cg.StateManager().VMSys(),
UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor,
root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
counter <- struct{}{}
return root, nil
}},
},
cg.BeaconSchedule(),
metadataDs,
)
require.NoError(t, err)
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewLegacyVM(ctx, vmopt)
require.NoError(t, err)
nvm.SetInvoker(inv)
return nvm, nil
})
ctx := context.Background()
base, _, err := sm.ExecutionTrace(ctx, ts)
require.NoError(t, err)
_, err = sm.HandleStateForks(context.Background(), base, ts.Height(), nil, ts)
require.NoError(t, err)
// Should not have increased as we should be using the cached results in the metadataDs
require.Equal(t, 1, len(counter))
}
}

View File

@ -2,10 +2,13 @@ package stmgr
import (
"context"
"fmt"
"sync"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
@ -54,6 +57,44 @@ type migration struct {
upgrade MigrationFunc
preMigrations []PreMigration
cache *nv16.MemMigrationCache
migrationResultCache *migrationResultCache
}
type migrationResultCache struct {
ds dstore.Batching
keyPrefix string
}
func (m *migrationResultCache) keyForMigration(root cid.Cid) dstore.Key {
kStr := fmt.Sprintf("%s/%s", m.keyPrefix, root)
return dstore.NewKey(kStr)
}
func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) {
k := m.keyForMigration(root)
bs, err := m.ds.Get(ctx, k)
if ipld.IsNotFound(err) {
return cid.Undef, false, nil
} else if err != nil {
return cid.Undef, false, xerrors.Errorf("error loading migration result: %w", err)
}
c, err := cid.Parse(bs)
if err != nil {
return cid.Undef, false, xerrors.Errorf("error parsing migration result: %w", err)
}
return c, true, nil
}
func (m *migrationResultCache) Store(ctx context.Context, root cid.Cid, resultCid cid.Cid) error {
k := m.keyForMigration(root)
if err := m.ds.Put(ctx, k, resultCid.Bytes()); err != nil {
return err
}
return nil
}
type Executor interface {
@ -90,7 +131,6 @@ type StateManager struct {
postCalicoVesting []msig0.State
genesisPledge abi.TokenAmount
genesisMarketFunds abi.TokenAmount
tsExec Executor
tsExecMonitor ExecMonitor
@ -103,7 +143,7 @@ type treeCache struct {
tree *state.StateTree
}
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule) (*StateManager, error) {
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching) (*StateManager, error) {
// If we have upgrades, make sure they're in-order and make sense.
if err := us.Validate(); err != nil {
return nil, err
@ -122,12 +162,18 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
upgrade: upgrade.Migration,
preMigrations: upgrade.PreMigrations,
cache: nv16.NewMemMigrationCache(),
migrationResultCache: &migrationResultCache{
keyPrefix: fmt.Sprintf("/migration-cache/nv%d", upgrade.Network),
ds: metadataDs,
},
}
stateMigrations[upgrade.Height] = migration
}
if upgrade.Expensive {
expensiveUpgrades[upgrade.Height] = struct{}{}
}
networkVersions = append(networkVersions, versionSpec{
networkVersion: lastVersion,
atOrBelow: upgrade.Height,
@ -155,8 +201,8 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
}, nil
}
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us, b)
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs)
if err != nil {
return nil, err
}

View File

@ -51,17 +51,13 @@ func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error {
return xerrors.Errorf("loading state tree: %w", err)
}
gmf, err := getFilMarketLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis market funds: %w", err)
}
gp, err := getFilPowerLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis pledge: %w", err)
}
sm.genesisMarketFunds = gmf
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
sm.genesisPledge = gp
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
@ -128,6 +124,8 @@ func (sm *StateManager) setupPostIgnitionVesting(ctx context.Context) error {
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
sm.postIgnitionVesting = make([]msig0.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
ns := msig0.State{
@ -178,6 +176,9 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(9_805_053))
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
sm.postCalicoVesting = make([]msig0.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
ns := msig0.State{
@ -198,21 +199,20 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) {
vf := big.Zero()
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
// TODO: combine all this?
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() {
err := sm.setupGenesisVestingSchedule(ctx)
if err != nil {
return vf, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
}
}
if sm.postIgnitionVesting == nil {
err := sm.setupPostIgnitionVesting(ctx)
if err != nil {
return vf, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
}
}
if sm.postCalicoVesting == nil {
err := sm.setupPostCalicoVesting(ctx)
@ -246,8 +246,6 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch)
if height <= build.UpgradeAssemblyHeight {
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
vf = big.Add(vf, sm.genesisPledge)
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
vf = big.Add(vf, sm.genesisMarketFunds)
}
return vf, nil

View File

@ -4,8 +4,8 @@ import (
"context"
"os"
"strconv"
"sync"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
var DefaultChainIndexCacheSize = 32 << 10
var DefaultChainIndexCacheSize = 32 << 15
func init() {
if s := os.Getenv("LOTUS_CHAIN_INDEX_CACHE"); s != "" {
@ -27,7 +27,8 @@ func init() {
}
type ChainIndex struct {
skipCache *lru.ARCCache
indexCacheLk sync.Mutex
indexCache map[types.TipSetKey]*lbEntry
loadTipSet loadTipSetFunc
@ -36,17 +37,14 @@ type ChainIndex struct {
type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error)
func NewChainIndex(lts loadTipSetFunc) *ChainIndex {
sc, _ := lru.NewARC(DefaultChainIndexCacheSize)
return &ChainIndex{
skipCache: sc,
indexCache: make(map[types.TipSetKey]*lbEntry, DefaultChainIndexCacheSize),
loadTipSet: lts,
skipLength: 20,
}
}
type lbEntry struct {
ts *types.TipSet
parentHeight abi.ChainEpoch
targetHeight abi.ChainEpoch
target types.TipSetKey
}
@ -58,25 +56,36 @@ func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet,
rounded, err := ci.roundDown(ctx, from)
if err != nil {
return nil, err
return nil, xerrors.Errorf("failed to round down: %w", err)
}
ci.indexCacheLk.Lock()
defer ci.indexCacheLk.Unlock()
cur := rounded.Key()
for {
cval, ok := ci.skipCache.Get(cur)
lbe, ok := ci.indexCache[cur]
if !ok {
fc, err := ci.fillCache(ctx, cur)
if err != nil {
return nil, err
return nil, xerrors.Errorf("failed to fill cache: %w", err)
}
cval = fc
lbe = fc
}
lbe := cval.(*lbEntry)
if lbe.ts.Height() == to || lbe.parentHeight < to {
return lbe.ts, nil
} else if to > lbe.targetHeight {
return ci.walkBack(ctx, lbe.ts, to)
if to == lbe.targetHeight {
ts, err := ci.loadTipSet(ctx, lbe.target)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset: %w", err)
}
return ts, nil
}
if to > lbe.targetHeight {
ts, err := ci.loadTipSet(ctx, cur)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset: %w", err)
}
return ci.walkBack(ctx, ts, to)
}
cur = lbe.target
@ -87,16 +96,17 @@ func (ci *ChainIndex) GetTipsetByHeightWithoutCache(ctx context.Context, from *t
return ci.walkBack(ctx, from, to)
}
// Caller must hold indexCacheLk
func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) {
ts, err := ci.loadTipSet(ctx, tsk)
if err != nil {
return nil, err
return nil, xerrors.Errorf("failed to load tipset: %w", err)
}
if ts.Height() == 0 {
return &lbEntry{
ts: ts,
parentHeight: 0,
targetHeight: 0,
target: tsk,
}, nil
}
@ -124,12 +134,10 @@ func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEn
}
lbe := &lbEntry{
ts: ts,
parentHeight: parent.Height(),
targetHeight: skipTarget.Height(),
target: skipTarget.Key(),
}
ci.skipCache.Add(tsk, lbe)
ci.indexCache[tsk] = lbe
return lbe, nil
}
@ -144,7 +152,7 @@ func (ci *ChainIndex) roundDown(ctx context.Context, ts *types.TipSet) (*types.T
rounded, err := ci.walkBack(ctx, ts, target)
if err != nil {
return nil, err
return nil, xerrors.Errorf("failed to walk back: %w", err)
}
return rounded, nil
@ -164,7 +172,7 @@ func (ci *ChainIndex) walkBack(ctx context.Context, from *types.TipSet, to abi.C
for {
pts, err := ci.loadTipSet(ctx, ts.Parents())
if err != nil {
return nil, err
return nil, xerrors.Errorf("failed to load tipset: %w", err)
}
if to > pts.Height() {

View File

@ -207,9 +207,7 @@ type mmCids struct {
}
func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) {
o, ok := cs.mmCache.Get(mmc)
if ok {
mmcids := o.(*mmCids)
if mmcids, ok := cs.mmCache.Get(mmc); ok {
return mmcids.bls, mmcids.secpk, nil
}
@ -229,7 +227,7 @@ func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.C
return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err)
}
cs.mmCache.Add(mmc, &mmCids{
cs.mmCache.Add(mmc, mmCids{
bls: blscids,
secpk: secpkcids,
})
@ -237,6 +235,26 @@ func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.C
return blscids, secpkcids, nil
}
func (cs *ChainStore) ReadReceipts(ctx context.Context, root cid.Cid) ([]types.MessageReceipt, error) {
a, err := blockadt.AsArray(cs.ActorStore(ctx), root)
if err != nil {
return nil, err
}
receipts := make([]types.MessageReceipt, 0, a.Length())
var rcpt types.MessageReceipt
if err := a.ForEach(&rcpt, func(i int64) error {
if int64(len(receipts)) != i {
return xerrors.Errorf("missing receipt %d", i)
}
receipts = append(receipts, rcpt)
return nil
}); err != nil {
return nil, err
}
return receipts, nil
}
func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
blscids, secpkcids, err := cs.ReadMsgMetaCids(ctx, b.Messages)
if err != nil {

View File

@ -3,7 +3,10 @@ package store
import (
"bytes"
"context"
"fmt"
"io"
"sync"
"time"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
@ -12,6 +15,8 @@ import (
carv2 "github.com/ipld/go-car/v2"
mh "github.com/multiformats/go-multihash"
cbg "github.com/whyrusleeping/cbor-gen"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
@ -132,6 +137,423 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
return root, nil
}
type walkSchedTaskType int
const (
finishTask walkSchedTaskType = -1
blockTask walkSchedTaskType = iota
messageTask
receiptTask
stateTask
dagTask
)
func (t walkSchedTaskType) String() string {
switch t {
case finishTask:
return "finish"
case blockTask:
return "block"
case messageTask:
return "message"
case receiptTask:
return "receipt"
case stateTask:
return "state"
case dagTask:
return "dag"
}
panic(fmt.Sprintf("unknow task %d", t))
}
type walkTask struct {
c cid.Cid
taskType walkSchedTaskType
}
// an ever growing FIFO
type taskFifo struct {
in chan walkTask
out chan walkTask
fifo []walkTask
}
type taskResult struct {
c cid.Cid
b blocks.Block
}
func newTaskFifo(bufferLen int) *taskFifo {
f := taskFifo{
in: make(chan walkTask, bufferLen),
out: make(chan walkTask, bufferLen),
fifo: make([]walkTask, 0),
}
go f.run()
return &f
}
func (f *taskFifo) Close() error {
close(f.in)
return nil
}
func (f *taskFifo) run() {
for {
if len(f.fifo) > 0 {
// we have items in slice
// try to put next out or read something in.
// blocks if nothing works.
next := f.fifo[0]
select {
case f.out <- next:
f.fifo = f.fifo[1:]
case elem, ok := <-f.in:
if !ok {
// drain and close out.
for _, elem := range f.fifo {
f.out <- elem
}
close(f.out)
return
}
f.fifo = append(f.fifo, elem)
}
} else {
// no elements in fifo to put out.
// Try to read in and block.
// When done, try to put out or add to fifo.
select {
case elem, ok := <-f.in:
if !ok {
close(f.out)
return
}
select {
case f.out <- elem:
default:
f.fifo = append(f.fifo, elem)
}
}
}
}
}
type walkSchedulerConfig struct {
numWorkers int
head *types.TipSet // Tipset to start walking from.
tail *types.TipSet // Tipset to end at.
includeMessages bool
includeReceipts bool
includeState bool
}
type walkScheduler struct {
ctx context.Context
cancel context.CancelFunc
store bstore.Blockstore
cfg walkSchedulerConfig
writer io.Writer
workerTasks *taskFifo
totalTasks atomic.Int64
results chan taskResult
writeErrorChan chan error
// tracks number of inflight tasks
//taskWg sync.WaitGroup
// launches workers and collects errors if any occur
workers *errgroup.Group
// set of CIDs already exported
seen sync.Map
}
func newWalkScheduler(ctx context.Context, store bstore.Blockstore, cfg walkSchedulerConfig, w io.Writer) (*walkScheduler, error) {
ctx, cancel := context.WithCancel(ctx)
workers, ctx := errgroup.WithContext(ctx)
s := &walkScheduler{
ctx: ctx,
cancel: cancel,
store: store,
cfg: cfg,
writer: w,
results: make(chan taskResult, cfg.numWorkers*64),
workerTasks: newTaskFifo(cfg.numWorkers * 64),
writeErrorChan: make(chan error, 1),
workers: workers,
}
go func() {
defer close(s.writeErrorChan)
for r := range s.results {
// Write
if err := carutil.LdWrite(s.writer, r.c.Bytes(), r.b.RawData()); err != nil {
// abort operations
cancel()
s.writeErrorChan <- err
}
}
}()
// workers
for i := 0; i < cfg.numWorkers; i++ {
f := func(n int) func() error {
return func() error {
return s.workerFunc(n)
}
}(i)
s.workers.Go(f)
}
s.totalTasks.Add(int64(len(cfg.head.Blocks())))
for _, b := range cfg.head.Blocks() {
select {
case <-ctx.Done():
log.Errorw("context done while sending root tasks", ctx.Err())
cancel() // kill workers
return nil, ctx.Err()
case s.workerTasks.in <- walkTask{
c: b.Cid(),
taskType: blockTask,
}:
}
}
return s, nil
}
func (s *walkScheduler) Wait() error {
err := s.workers.Wait()
// all workers done. One would have reached genesis and notified the
// rest to exit. Yet, there might be some pending tasks in the queue,
// so we need to run a "single worker".
if err != nil {
log.Errorw("export workers finished with error", "error", err)
}
for {
if n := s.totalTasks.Load(); n == 0 {
break // finally fully done
}
select {
case task := <-s.workerTasks.out:
s.totalTasks.Add(-1)
if err != nil {
continue // just drain if errors happened.
}
err = s.processTask(task, 0)
}
}
close(s.results)
errWrite := <-s.writeErrorChan
if errWrite != nil {
log.Errorw("error writing to CAR file", "error", err)
return errWrite
}
s.workerTasks.Close() //nolint:errcheck
return err
}
func (s *walkScheduler) enqueueIfNew(task walkTask) {
if task.c.Prefix().MhType == mh.IDENTITY {
//log.Infow("ignored", "cid", todo.c.String())
return
}
if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
//log.Infow("ignored", "cid", todo.c.String())
return
}
if _, loaded := s.seen.LoadOrStore(task.c, struct{}{}); loaded {
// we already had it on the map
return
}
log.Debugw("enqueue", "type", task.taskType.String(), "cid", task.c.String())
s.totalTasks.Add(1)
s.workerTasks.in <- task
}
func (s *walkScheduler) sendFinish(workerN int) error {
log.Infow("worker finished work", "worker", workerN)
s.totalTasks.Add(1)
s.workerTasks.in <- walkTask{
taskType: finishTask,
}
return nil
}
func (s *walkScheduler) workerFunc(workerN int) error {
log.Infow("starting worker", "worker", workerN)
for t := range s.workerTasks.out {
s.totalTasks.Add(-1)
select {
case <-s.ctx.Done():
return s.ctx.Err()
default:
// A worker reached genesis, so we wind down and let others do
// the same. Exit.
if t.taskType == finishTask {
return s.sendFinish(workerN)
}
}
err := s.processTask(t, workerN)
if err != nil {
return err
}
// continue
}
return nil
}
func (s *walkScheduler) processTask(t walkTask, workerN int) error {
if t.taskType == finishTask {
return nil
}
blk, err := s.store.Get(s.ctx, t.c)
if err != nil {
return xerrors.Errorf("writing object to car, bs.Get: %w", err)
}
s.results <- taskResult{
c: t.c,
b: blk,
}
// extract relevant dags to walk from the block
if t.taskType == blockTask {
blk := t.c
data, err := s.store.Get(s.ctx, blk)
if err != nil {
return err
}
var b types.BlockHeader
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
return xerrors.Errorf("unmarshalling block header (cid=%s): %w", blk, err)
}
if b.Height%1_000 == 0 {
log.Infow("block export", "height", b.Height)
}
if b.Height == 0 {
log.Info("exporting genesis block")
for i := range b.Parents {
s.enqueueIfNew(walkTask{
c: b.Parents[i],
taskType: dagTask,
})
}
s.enqueueIfNew(walkTask{
c: b.ParentStateRoot,
taskType: stateTask,
})
return s.sendFinish(workerN)
}
// enqueue block parents
for i := range b.Parents {
s.enqueueIfNew(walkTask{
c: b.Parents[i],
taskType: blockTask,
})
}
if s.cfg.tail.Height() >= b.Height {
log.Debugw("tail reached: only blocks will be exported from now until genesis", "cid", blk.String())
return nil
}
if s.cfg.includeMessages {
// enqueue block messages
s.enqueueIfNew(walkTask{
c: b.Messages,
taskType: messageTask,
})
}
if s.cfg.includeReceipts {
// enqueue block receipts
s.enqueueIfNew(walkTask{
c: b.ParentMessageReceipts,
taskType: receiptTask,
})
}
if s.cfg.includeState {
s.enqueueIfNew(walkTask{
c: b.ParentStateRoot,
taskType: stateTask,
})
}
return nil
}
// Not a chain-block: we scan for CIDs in the raw block-data
return cbg.ScanForLinks(bytes.NewReader(blk.RawData()), func(c cid.Cid) {
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
return
}
s.enqueueIfNew(walkTask{
c: c,
taskType: dagTask,
})
})
}
func (cs *ChainStore) ExportRange(
ctx context.Context,
w io.Writer,
head, tail *types.TipSet,
messages, receipts, stateroots bool,
workers int) error {
h := &car.CarHeader{
Roots: head.Cids(),
Version: 1,
}
if err := car.WriteHeader(h, w); err != nil {
return xerrors.Errorf("failed to write car header: %s", err)
}
start := time.Now()
log.Infow("walking snapshot range",
"head", head.Key(),
"tail", tail.Key(),
"messages", messages,
"receipts", receipts,
"stateroots",
stateroots,
"workers", workers)
cfg := walkSchedulerConfig{
numWorkers: workers,
head: head,
tail: tail,
includeMessages: messages,
includeState: stateroots,
includeReceipts: receipts,
}
pw, err := newWalkScheduler(ctx, cs.UnionStore(), cfg, w)
if err != nil {
return err
}
// wait until all workers are done.
err = pw.Wait()
if err != nil {
log.Errorw("walker scheduler", "error", err)
return err
}
log.Infow("walking snapshot range complete", "duration", time.Since(start), "success", err == nil)
return nil
}
func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs, skipMsgReceipts bool, cb func(cid.Cid) error) error {
if ts == nil {
ts = cs.GetHeaviestTipSet()

View File

@ -11,7 +11,7 @@ import (
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
@ -120,8 +120,8 @@ type ChainStore struct {
reorgCh chan<- reorg
reorgNotifeeCh chan ReorgNotifee
mmCache *lru.ARCCache // msg meta cache (mh.Messages -> secp, bls []cid)
tsCache *lru.ARCCache
mmCache *lru.ARCCache[cid.Cid, mmCids]
tsCache *lru.ARCCache[types.TipSetKey, *types.TipSet]
evtTypes [1]journal.EventType
journal journal.Journal
@ -133,8 +133,8 @@ type ChainStore struct {
}
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
c, _ := lru.NewARC[cid.Cid, mmCids](DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC[types.TipSetKey, *types.TipSet](DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
@ -818,9 +818,8 @@ func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHead
}
func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
v, ok := cs.tsCache.Get(tsk)
if ok {
return v.(*types.TipSet), nil
if ts, ok := cs.tsCache.Get(tsk); ok {
return ts, nil
}
// Fetch tipset block headers from blockstore in parallel

View File

@ -15,6 +15,7 @@ import (
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/stmgr"
@ -195,7 +196,8 @@ func TestChainExportImportFull(t *testing.T) {
}
nbs := blockstore.NewMemorySync()
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
ds := datastore.NewMapDatastore()
cs := store.NewChainStore(nbs, nbs, ds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
root, err := cs.Import(context.TODO(), buf)
@ -212,7 +214,7 @@ func TestChainExportImportFull(t *testing.T) {
t.Fatal("imported chain differed from exported chain")
}
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule())
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds)
if err != nil {
t.Fatal(err)
}

View File

@ -7,11 +7,12 @@ import (
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
"github.com/ipni/storetheindex/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/peer"
@ -20,7 +21,6 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-legs/dtsync"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
@ -217,7 +217,7 @@ func fetchCids(
type BlockValidator struct {
self peer.ID
peers *lru.TwoQueueCache
peers *lru.TwoQueueCache[peer.ID, int]
killThresh int
@ -231,7 +231,7 @@ type BlockValidator struct {
}
func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Consensus, blacklist func(peer.ID)) *BlockValidator {
p, _ := lru.New2Q(4096)
p, _ := lru.New2Q[peer.ID, int](4096)
return &BlockValidator{
self: self,
peers: p,
@ -244,21 +244,19 @@ func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Cons
}
func (bv *BlockValidator) flagPeer(p peer.ID) {
v, ok := bv.peers.Get(p)
val, ok := bv.peers.Get(p)
if !ok {
bv.peers.Add(p, int(1))
bv.peers.Add(p, 1)
return
}
val := v.(int)
if val >= bv.killThresh {
log.Warnf("blacklisting peer %s", p)
bv.blacklist(p)
return
}
bv.peers.Add(p, v.(int)+1)
bv.peers.Add(p, val+1)
}
func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) {
@ -273,7 +271,7 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
}()
var what string
res, what = bv.consensus.ValidateBlockPubsub(ctx, pid == bv.self, msg)
res, what = consensus.ValidateBlockPubsub(ctx, bv.consensus, pid == bv.self, msg)
if res == pubsub.ValidationAccept {
// it's a good block! make sure we've only seen it once
if count := bv.recvBlocks.add(msg.ValidatorData.(*types.BlockMsg).Cid()); count > 0 {
@ -293,11 +291,11 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
}
type blockReceiptCache struct {
blocks *lru.TwoQueueCache
blocks *lru.TwoQueueCache[cid.Cid, int]
}
func newBlockReceiptCache() *blockReceiptCache {
c, _ := lru.New2Q(8192)
c, _ := lru.New2Q[cid.Cid, int](8192)
return &blockReceiptCache{
blocks: c,
@ -307,12 +305,12 @@ func newBlockReceiptCache() *blockReceiptCache {
func (brc *blockReceiptCache) add(bcid cid.Cid) int {
val, ok := brc.blocks.Get(bcid)
if !ok {
brc.blocks.Add(bcid, int(1))
brc.blocks.Add(bcid, 1)
return 0
}
brc.blocks.Add(bcid, val.(int)+1)
return val.(int)
brc.blocks.Add(bcid, val+1)
return val
}
type MessageValidator struct {
@ -466,13 +464,13 @@ type peerMsgInfo struct {
type IndexerMessageValidator struct {
self peer.ID
peerCache *lru.TwoQueueCache
peerCache *lru.TwoQueueCache[address.Address, *peerMsgInfo]
chainApi full.ChainModuleAPI
stateApi full.StateModuleAPI
}
func NewIndexerMessageValidator(self peer.ID, chainApi full.ChainModuleAPI, stateApi full.StateModuleAPI) *IndexerMessageValidator {
peerCache, _ := lru.New2Q(8192)
peerCache, _ := lru.New2Q[address.Address, *peerMsgInfo](8192)
return &IndexerMessageValidator{
self: self,
@ -497,7 +495,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
return pubsub.ValidationIgnore
}
idxrMsg := dtsync.Message{}
idxrMsg := message.Message{}
err := idxrMsg.UnmarshalCBOR(bytes.NewBuffer(msg.Data))
if err != nil {
log.Errorw("Could not decode indexer pubsub message", "err", err)
@ -515,15 +513,12 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
return pubsub.ValidationReject
}
minerID := minerAddr.String()
msgCid := idxrMsg.Cid
var msgInfo *peerMsgInfo
val, ok := v.peerCache.Get(minerID)
msgInfo, ok := v.peerCache.Get(minerAddr)
if !ok {
msgInfo = &peerMsgInfo{}
} else {
msgInfo = val.(*peerMsgInfo)
}
// Lock this peer's message info.
@ -544,7 +539,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
// Check that the miner ID maps to the peer that sent the message.
err = v.authenticateMessage(ctx, minerAddr, originPeer)
if err != nil {
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerID)
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerAddr)
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
return pubsub.ValidationReject
}
@ -554,7 +549,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
// messages from the same peer are handled concurrently, there is a
// small chance that one msgInfo could replace the other here when
// the info is first cached. This is OK, so no need to prevent it.
v.peerCache.Add(minerID, msgInfo)
v.peerCache.Add(minerAddr, msgInfo)
}
}

View File

@ -9,12 +9,12 @@ import (
"github.com/golang/mock/gomock"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/ipni/storetheindex/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-legs/dtsync"
"github.com/filecoin-project/lotus/api/mocks"
"github.com/filecoin-project/lotus/chain/types"
@ -105,7 +105,7 @@ func TestIndexerMessageValidator_Validate(t *testing.T) {
mc := gomock.NewController(t)
node := mocks.NewMockFullNode(mc)
subject := NewIndexerMessageValidator(peer.ID(tc.selfPID), node, node)
message := dtsync.Message{
message := message.Message{
Cid: validCid,
Addrs: nil,
ExtraData: tc.extraData,

View File

@ -7,6 +7,7 @@ import (
"io"
"math"
"sort"
time "time"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
@ -15,6 +16,7 @@ import (
address "github.com/filecoin-project/go-address"
abi "github.com/filecoin-project/go-state-types/abi"
crypto "github.com/filecoin-project/go-state-types/crypto"
exitcode "github.com/filecoin-project/go-state-types/exitcode"
proof "github.com/filecoin-project/go-state-types/proof"
)
@ -338,7 +340,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -616,7 +618,7 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -826,7 +828,7 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -1589,7 +1591,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -2078,3 +2080,659 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) {
}
return nil
}
var lengthBufGasTrace = []byte{133}
func (t *GasTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufGasTrace); err != nil {
return err
}
// t.Name (string) (string)
if len(t.Name) > cbg.MaxLength {
return xerrors.Errorf("Value in field t.Name was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
return err
}
if _, err := io.WriteString(w, string(t.Name)); err != nil {
return err
}
// t.TotalGas (int64) (int64)
if t.TotalGas >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalGas)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TotalGas-1)); err != nil {
return err
}
}
// t.ComputeGas (int64) (int64)
if t.ComputeGas >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ComputeGas)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ComputeGas-1)); err != nil {
return err
}
}
// t.StorageGas (int64) (int64)
if t.StorageGas >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StorageGas)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StorageGas-1)); err != nil {
return err
}
}
// t.TimeTaken (time.Duration) (int64)
if t.TimeTaken >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TimeTaken)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TimeTaken-1)); err != nil {
return err
}
}
return nil
}
func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = GasTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 5 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Name (string) (string)
{
sval, err := cbg.ReadString(cr)
if err != nil {
return err
}
t.Name = string(sval)
}
// t.TotalGas (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.TotalGas = int64(extraI)
}
// t.ComputeGas (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.ComputeGas = int64(extraI)
}
// t.StorageGas (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StorageGas = int64(extraI)
}
// t.TimeTaken (time.Duration) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.TimeTaken = time.Duration(extraI)
}
return nil
}
var lengthBufMessageTrace = []byte{134}
func (t *MessageTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufMessageTrace); err != nil {
return err
}
// t.From (address.Address) (struct)
if err := t.From.MarshalCBOR(cw); err != nil {
return err
}
// t.To (address.Address) (struct)
if err := t.To.MarshalCBOR(cw); err != nil {
return err
}
// t.Value (big.Int) (struct)
if err := t.Value.MarshalCBOR(cw); err != nil {
return err
}
// t.Method (abi.MethodNum) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Method)); err != nil {
return err
}
// t.Params ([]uint8) (slice)
if len(t.Params) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Params was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Params))); err != nil {
return err
}
if _, err := cw.Write(t.Params[:]); err != nil {
return err
}
// t.ParamsCodec (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ParamsCodec)); err != nil {
return err
}
return nil
}
func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = MessageTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 6 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.From (address.Address) (struct)
{
if err := t.From.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.From: %w", err)
}
}
// t.To (address.Address) (struct)
{
if err := t.To.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.To: %w", err)
}
}
// t.Value (big.Int) (struct)
{
if err := t.Value.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Value: %w", err)
}
}
// t.Method (abi.MethodNum) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Method = abi.MethodNum(extra)
}
// t.Params ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Params: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Params = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Params[:]); err != nil {
return err
}
// t.ParamsCodec (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.ParamsCodec = uint64(extra)
}
return nil
}
var lengthBufReturnTrace = []byte{131}
func (t *ReturnTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufReturnTrace); err != nil {
return err
}
// t.ExitCode (exitcode.ExitCode) (int64)
if t.ExitCode >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil {
return err
}
}
// t.Return ([]uint8) (slice)
if len(t.Return) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Return was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Return))); err != nil {
return err
}
if _, err := cw.Write(t.Return[:]); err != nil {
return err
}
// t.ReturnCodec (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ReturnCodec)); err != nil {
return err
}
return nil
}
func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = ReturnTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.ExitCode (exitcode.ExitCode) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.ExitCode = exitcode.ExitCode(extraI)
}
// t.Return ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Return: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Return = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Return[:]); err != nil {
return err
}
// t.ReturnCodec (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.ReturnCodec = uint64(extra)
}
return nil
}
var lengthBufExecutionTrace = []byte{132}
func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufExecutionTrace); err != nil {
return err
}
// t.Msg (types.MessageTrace) (struct)
if err := t.Msg.MarshalCBOR(cw); err != nil {
return err
}
// t.MsgRct (types.ReturnTrace) (struct)
if err := t.MsgRct.MarshalCBOR(cw); err != nil {
return err
}
// t.GasCharges ([]*types.GasTrace) (slice)
if len(t.GasCharges) > 1000000000 {
return xerrors.Errorf("Slice value in field t.GasCharges was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.GasCharges))); err != nil {
return err
}
for _, v := range t.GasCharges {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
// t.Subcalls ([]types.ExecutionTrace) (slice)
if len(t.Subcalls) > 1000000000 {
return xerrors.Errorf("Slice value in field t.Subcalls was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Subcalls))); err != nil {
return err
}
for _, v := range t.Subcalls {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
return nil
}
func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = ExecutionTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 4 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Msg (types.MessageTrace) (struct)
{
if err := t.Msg.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Msg: %w", err)
}
}
// t.MsgRct (types.ReturnTrace) (struct)
{
if err := t.MsgRct.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.MsgRct: %w", err)
}
}
// t.GasCharges ([]*types.GasTrace) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 1000000000 {
return fmt.Errorf("t.GasCharges: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.GasCharges = make([]*GasTrace, extra)
}
for i := 0; i < int(extra); i++ {
var v GasTrace
if err := v.UnmarshalCBOR(cr); err != nil {
return err
}
t.GasCharges[i] = &v
}
// t.Subcalls ([]types.ExecutionTrace) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 1000000000 {
return fmt.Errorf("t.Subcalls: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Subcalls = make([]ExecutionTrace, extra)
}
for i := 0; i < int(extra); i++ {
var v ExecutionTrace
if err := v.UnmarshalCBOR(cr); err != nil {
return err
}
t.Subcalls[i] = v
}
return nil
}

View File

@ -44,14 +44,6 @@ type EthTx struct {
S EthBigInt `json:"s"`
}
func (tx *EthTx) Reward(blkBaseFee big.Int) EthBigInt {
availablePriorityFee := big.Sub(big.Int(tx.MaxFeePerGas), blkBaseFee)
if big.Cmp(big.Int(tx.MaxPriorityFeePerGas), availablePriorityFee) <= 0 {
return tx.MaxPriorityFeePerGas
}
return EthBigInt(availablePriorityFee)
}
type EthTxArgs struct {
ChainID int `json:"chainId"`
Nonce int `json:"nonce"`

View File

@ -295,17 +295,21 @@ func EthAddressFromPubKey(pubk []byte) ([]byte, error) {
return ethAddr, nil
}
var maskedIDPrefix = [20 - 8]byte{0xff}
func IsEthAddress(addr address.Address) bool {
if addr.Protocol() != address.Delegated {
return false
}
payload := addr.Payload()
namespace, _, err := varint.FromUvarint(payload)
namespace, offset, err := varint.FromUvarint(payload)
if err != nil {
return false
}
return namespace == builtintypes.EthereumAddressManagerActorID
payload = payload[offset:]
return namespace == builtintypes.EthereumAddressManagerActorID && len(payload) == 20 && !bytes.HasPrefix(payload, maskedIDPrefix[:])
}
func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) {
@ -326,9 +330,17 @@ func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) {
return EthAddress{}, xerrors.Errorf("invalid delegated address namespace in: %s", addr)
}
payload = payload[n:]
if namespace == builtintypes.EthereumAddressManagerActorID {
return CastEthAddress(payload)
if namespace != builtintypes.EthereumAddressManagerActorID {
return EthAddress{}, ErrInvalidAddress
}
ethAddr, err := CastEthAddress(payload)
if err != nil {
return EthAddress{}, err
}
if ethAddr.IsMaskedID() {
return EthAddress{}, xerrors.Errorf("f410f addresses cannot embed masked-ID payloads: %s", ethAddr)
}
return ethAddr, nil
}
return EthAddress{}, ErrInvalidAddress
}
@ -376,8 +388,7 @@ func (ea *EthAddress) UnmarshalJSON(b []byte) error {
}
func (ea EthAddress) IsMaskedID() bool {
idmask := [12]byte{0xff}
return bytes.Equal(ea[:12], idmask[:])
return bytes.HasPrefix(ea[:], maskedIDPrefix[:])
}
func (ea EthAddress) ToFilecoinAddress() (address.Address, error) {

View File

@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin"
)
type TestCase struct {
@ -178,6 +179,20 @@ func TestParseEthAddr(t *testing.T) {
}
}
func TestMaskedIDInF4(t *testing.T) {
addr, err := address.NewIDAddress(100)
require.NoError(t, err)
eaddr, err := EthAddressFromFilecoinAddress(addr)
require.NoError(t, err)
badaddr, err := address.NewDelegatedAddress(builtin.EthereumAddressManagerActorID, eaddr[:])
require.NoError(t, err)
_, err = EthAddressFromFilecoinAddress(badaddr)
require.Error(t, err)
}
func TestUnmarshalEthCall(t *testing.T) {
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":""}`

View File

@ -2,44 +2,41 @@ package types
import (
"encoding/json"
"fmt"
"regexp"
"runtime"
"strings"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
)
type ExecutionTrace struct {
Msg *Message
MsgRct *MessageReceipt
Error string
Duration time.Duration
GasCharges []*GasTrace
Subcalls []ExecutionTrace
}
type GasTrace struct {
Name string
Location []Loc `json:"loc"`
TotalGas int64 `json:"tg"`
ComputeGas int64 `json:"cg"`
StorageGas int64 `json:"sg"`
TotalVirtualGas int64 `json:"vtg"`
VirtualComputeGas int64 `json:"vcg"`
VirtualStorageGas int64 `json:"vsg"`
TimeTaken time.Duration `json:"tt"`
Extra interface{} `json:"ex,omitempty"`
Callers []uintptr `json:"-"`
}
type Loc struct {
File string
Line int
Function string
type MessageTrace struct {
From address.Address
To address.Address
Value abi.TokenAmount
Method abi.MethodNum
Params []byte
ParamsCodec uint64
}
type ReturnTrace struct {
ExitCode exitcode.ExitCode
Return []byte
ReturnCodec uint64
}
type ExecutionTrace struct {
Msg MessageTrace
MsgRct ReturnTrace
GasCharges []*GasTrace `cborgen:"maxlen=1000000000"`
Subcalls []ExecutionTrace `cborgen:"maxlen=1000000000"`
}
func (et ExecutionTrace) SumGas() GasTrace {
@ -52,71 +49,13 @@ func SumGas(charges []*GasTrace) GasTrace {
out.TotalGas += gc.TotalGas
out.ComputeGas += gc.ComputeGas
out.StorageGas += gc.StorageGas
out.TotalVirtualGas += gc.TotalVirtualGas
out.VirtualComputeGas += gc.VirtualComputeGas
out.VirtualStorageGas += gc.VirtualStorageGas
}
return out
}
func (l Loc) Show() bool {
ignorePrefix := []string{
"reflect.",
"github.com/filecoin-project/lotus/chain/vm.(*Invoker).transform",
"github.com/filecoin-project/go-amt-ipld/",
}
for _, pre := range ignorePrefix {
if strings.HasPrefix(l.Function, pre) {
return false
}
}
return true
}
func (l Loc) String() string {
file := strings.Split(l.File, "/")
fn := strings.Split(l.Function, "/")
var fnpkg string
if len(fn) > 2 {
fnpkg = strings.Join(fn[len(fn)-2:], "/")
} else {
fnpkg = l.Function
}
return fmt.Sprintf("%s@%s:%d", fnpkg, file[len(file)-1], l.Line)
}
var importantRegex = regexp.MustCompile(`github.com/filecoin-project/specs-actors/(v\d+/)?actors/builtin`)
func (l Loc) Important() bool {
return importantRegex.MatchString(l.Function)
}
func (gt *GasTrace) MarshalJSON() ([]byte, error) {
type GasTraceCopy GasTrace
if len(gt.Location) == 0 {
if len(gt.Callers) != 0 {
frames := runtime.CallersFrames(gt.Callers)
for {
frame, more := frames.Next()
if frame.Function == "github.com/filecoin-project/lotus/chain/vm.(*VM).ApplyMessage" {
break
}
l := Loc{
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
gt.Location = append(gt.Location, l)
if !more {
break
}
}
}
}
cpy := (*GasTraceCopy)(gt)
return json.Marshal(cpy)
}

View File

@ -207,6 +207,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version)
return xerrors.Errorf("'GasLimit' field cannot be greater than a block's gas limit (%d > %d)", m.GasLimit, build.BlockGasLimit)
}
if m.GasLimit <= 0 {
return xerrors.Errorf("'GasLimit' field %d must be positive", m.GasLimit)
}
// since prices might vary with time, this is technically semantic validation
if m.GasLimit < minGas {
return xerrors.Errorf("'GasLimit' field cannot be less than the cost of storing a message on chain %d < %d", m.GasLimit, minGas)
@ -215,4 +219,17 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version)
return nil
}
// EffectiveGasPremium returns the effective gas premium claimable by the miner
// given the supplied base fee.
//
// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the
// specified premium.
func (m *Message) EffectiveGasPremium(baseFee abi.TokenAmount) abi.TokenAmount {
available := big.Sub(m.GasFeeCap, baseFee)
if big.Cmp(m.GasPremium, available) <= 0 {
return m.GasPremium
}
return available
}
const TestGasLimit = 100e6

Some files were not shown because too many files have changed in this diff Show More